ftp://ftp.redhat.com/pub/redhat/linux/rawhide/SRPMS/SRPMS/gnome-vfs2-2.3.8-1.src.rpm
[gnome-vfs-httpcaptive.git] / libgnomevfs / gnome-vfs-thread-pool.c
1 /* -*- Mode: C; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
2 /* gnome-vfs-thread-pool.c - Simple thread pool implementation
3
4    Copyright (C) 2000 Eazel, Inc.
5
6    The Gnome Library is free software; you can redistribute it and/or
7    modify it under the terms of the GNU Library General Public License as
8    published by the Free Software Foundation; either version 2 of the
9    License, or (at your option) any later version.
10
11    The Gnome Library is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14    Library General Public License for more details.
15
16    You should have received a copy of the GNU Library General Public
17    License along with the Gnome Library; see the file COPYING.LIB.  If not,
18    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19    Boston, MA 02111-1307, USA.
20
21    Author: Pavel Cisler <pavel@eazel.com>
22 */
23
24 #include <config.h>
25 #include "gnome-vfs-thread-pool.h"
26 #include "gnome-vfs-job-queue.h"
27 #include <libgnomevfs/gnome-vfs-job-limit.h>
28 #include <glib/glist.h>
29 #include <glib/gmessages.h>
30
31 #undef DEBUG_PRINT
32
33 #define GNOME_VFS_THREAD_STACK_SIZE 256*1024
34
35 #if 0
36 #define DEBUG_PRINT(x) g_print x
37 #else
38 #define DEBUG_PRINT(x)
39 #endif
40
41 typedef struct {
42         GThread *thread;
43         GMutex *waiting_for_work_lock;
44         GCond *waiting_for_work_lock_condition;
45         
46         void *(* entry_point) (void *);
47         void *entry_data;
48         
49         volatile gboolean exit_requested;
50 } GnomeVFSThreadState;
51
52 static GStaticMutex thread_list_lock = G_STATIC_MUTEX_INIT;
53
54 static const int MAX_AVAILABLE_THREADS = 20; 
55 static GList *available_threads;
56 static int thread_count;
57
58 static void *thread_entry (void *cast_to_state);
59 static void destroy_thread_state (GnomeVFSThreadState *state);
60
61 void 
62 _gnome_vfs_thread_pool_init (void)
63 {
64 }
65
66 static GnomeVFSThreadState *
67 new_thread_state (void)
68 {
69         GnomeVFSThreadState *state;
70         GError *error;
71         
72         state = g_new0 (GnomeVFSThreadState, 1);
73
74         state->waiting_for_work_lock = g_mutex_new ();
75         state->waiting_for_work_lock_condition = g_cond_new ();
76
77         error = NULL;
78
79         /* spawn a new thread, call the entry point immediately -- it will block
80          * until it receives a new entry_point for the first job to execute
81          */
82         state->thread = g_thread_create_full (thread_entry, state,
83                                               GNOME_VFS_THREAD_STACK_SIZE,
84                                               FALSE, FALSE,
85                                               G_THREAD_PRIORITY_NORMAL, &error);
86
87         DEBUG_PRINT (("new thread %p\n", state->thread));
88         
89         if (error != NULL || !state->thread) {
90                 g_error_free (error);
91                 return NULL;
92         }
93         
94         return state;
95 }
96
97 static void
98 destroy_thread_state (GnomeVFSThreadState *state)
99 {
100         g_mutex_free (state->waiting_for_work_lock);
101         g_cond_free (state->waiting_for_work_lock_condition);
102         g_free (state);
103 }
104
105 static gboolean
106 make_thread_available (GnomeVFSThreadState *state)
107 {
108         /* thread is done with it's work, add it to the available pool */
109         gboolean delete_thread = TRUE;
110         int job_limit;
111
112         g_mutex_lock (state->waiting_for_work_lock);
113         /* we are done with the last task, clear it out */
114         state->entry_point = NULL;
115         g_mutex_unlock (state->waiting_for_work_lock);
116
117         g_static_mutex_lock (&thread_list_lock);
118
119         job_limit = gnome_vfs_async_get_job_limit();
120         if (thread_count < MIN(MAX_AVAILABLE_THREADS, job_limit)) {
121                 /* haven't hit the max thread limit yet, add the now available
122                  * thread to the pool
123                  */
124                 available_threads = g_list_prepend (available_threads, state);
125                 thread_count++;
126                 delete_thread = FALSE;
127                 DEBUG_PRINT (("adding thread %p the pool, %d threads\n",
128                               state->thread, thread_count));
129         }
130
131         g_static_mutex_unlock (&thread_list_lock);
132         
133         return !delete_thread;
134 }
135
136 static void
137 gnome_vfs_thread_pool_wait_for_work (GnomeVFSThreadState *state)
138 {
139         /* FIXME: The Eazel profiler should be taught about this call
140          * and ignore any timings it collects from the program hanging out
141          * in here.
142          */
143
144         /* Wait to get scheduled to do some work. */
145         DEBUG_PRINT (("thread %p getting ready to wait for work \n",
146                       state->thread));
147
148         g_mutex_lock (state->waiting_for_work_lock);
149         if (state->entry_point != NULL) {
150                 DEBUG_PRINT (("thread %p ready to work right away \n",
151                               state->thread));
152         } else {
153                 while (state->entry_point == NULL) {
154                         /* Don't have any work yet, wait till we get some. */
155                         DEBUG_PRINT (("thread %p waiting for work \n", state->thread));
156                         g_cond_wait (state->waiting_for_work_lock_condition,
157                                      state->waiting_for_work_lock);
158                 }
159         }
160
161         g_mutex_unlock (state->waiting_for_work_lock);
162         DEBUG_PRINT (("thread %p woken up\n", state->thread));
163 }
164
165 static void *
166 thread_entry (void *cast_to_state)
167 {
168         GnomeVFSThreadState *state = (GnomeVFSThreadState *)cast_to_state;
169
170         for (;;) {
171                 
172                 if (state->exit_requested) {
173                         /* We have been explicitly asked to expire */
174                         break;
175                 }
176                 
177                 gnome_vfs_thread_pool_wait_for_work (state);
178                 g_assert (state->entry_point);
179
180                 /* Enter the actual thread entry point. */
181                 (*state->entry_point) (state->entry_data);
182
183                 if (!make_thread_available (state)) {
184                         /* Available thread pool is full of threads, just let this one
185                          * expire.
186                          */
187                         break;
188                 }
189
190                 /* We're finished with this job so run the job queue scheduler 
191                  * to start a new job if the queue is not empty
192                  */
193                 _gnome_vfs_job_queue_run ();
194         }
195
196         destroy_thread_state (state);
197         return NULL;
198 }
199
200 int 
201 _gnome_vfs_thread_create (void *(* thread_routine) (void *),
202                          void *thread_arguments)
203 {
204         GnomeVFSThreadState *available_thread;
205         
206         g_static_mutex_lock (&thread_list_lock);
207         if (available_threads == NULL) {
208                 /* Thread pool empty, create a new thread. */
209                 available_thread = new_thread_state ();
210         } else {
211                 /* Pick the next available thread from the list. */
212                 available_thread = (GnomeVFSThreadState *)available_threads->data;
213                 available_threads = g_list_remove (available_threads, available_thread);
214                 thread_count--;
215                 DEBUG_PRINT (("got thread %p from the pool, %d threads left\n",
216                               available_thread->thread, thread_count));
217         }
218         g_static_mutex_unlock (&thread_list_lock);
219         
220         if (available_thread == NULL) {
221                 /* Failed to allocate a new thread. */
222                 return -1;
223         }
224         
225         /* Lock it so we can condition-signal it next. */
226         g_mutex_lock (available_thread->waiting_for_work_lock);
227
228         /* Prepare work for the thread. */
229         available_thread->entry_point = thread_routine;
230         available_thread->entry_data = thread_arguments;
231
232         /* Unleash the thread. */
233         DEBUG_PRINT (("waking up thread %p\n", available_thread->thread));
234         g_cond_signal (available_thread->waiting_for_work_lock_condition);
235         g_mutex_unlock (available_thread->waiting_for_work_lock);
236
237         return 0;
238 }
239
240 void 
241 _gnome_vfs_thread_pool_shutdown (void)
242 {
243         GnomeVFSThreadState *thread_state;
244         for (;;) {
245                 thread_state = NULL;
246                 
247                 g_static_mutex_lock (&thread_list_lock);
248                 if (available_threads != NULL) {
249                         /* Pick the next thread from the list. */
250                         thread_state = (GnomeVFSThreadState *)available_threads->data;
251                         available_threads = g_list_remove (available_threads, thread_state);
252                 }
253                 g_static_mutex_unlock (&thread_list_lock);
254                 
255                 if (thread_state == NULL) {
256                         break;
257                 }
258                 
259                 g_mutex_lock (thread_state->waiting_for_work_lock);
260                 /* Tell the thread to expire. */
261                 thread_state->exit_requested = TRUE;
262                 g_cond_signal (thread_state->waiting_for_work_lock_condition);
263                 g_mutex_unlock (thread_state->waiting_for_work_lock);
264         }
265 }
266