22
22
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23
23
* Use is subject to license terms.
24
24
*/
25
+ /*
26
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27
+ */
25
28
26
29
#include <sys/zfs_context.h>
27
30
28
31
int taskq_now ;
29
32
taskq_t * system_taskq ;
30
33
31
- typedef struct task {
32
- struct task * task_next ;
33
- struct task * task_prev ;
34
- task_func_t * task_func ;
35
- void * task_arg ;
36
- } task_t ;
37
-
38
34
#define TASKQ_ACTIVE 0x00010000
39
35
40
36
struct taskq {
@@ -51,18 +47,19 @@ struct taskq {
51
47
int tq_maxalloc ;
52
48
kcondvar_t tq_maxalloc_cv ;
53
49
int tq_maxalloc_wait ;
54
- task_t * tq_freelist ;
55
- task_t tq_task ;
50
+ taskq_ent_t * tq_freelist ;
51
+ taskq_ent_t tq_task ;
56
52
};
57
53
58
- static task_t *
54
+ static taskq_ent_t *
59
55
task_alloc (taskq_t * tq , int tqflags )
60
56
{
61
- task_t * t ;
57
+ taskq_ent_t * t ;
62
58
int rv ;
63
59
64
60
again : if ((t = tq -> tq_freelist ) != NULL && tq -> tq_nalloc >= tq -> tq_minalloc ) {
65
- tq -> tq_freelist = t -> task_next ;
61
+ ASSERT (!(t -> tqent_flags & TQENT_FLAG_PREALLOC ));
62
+ tq -> tq_freelist = t -> tqent_next ;
66
63
} else {
67
64
if (tq -> tq_nalloc >= tq -> tq_maxalloc ) {
68
65
if (!(tqflags & KM_SLEEP ))
@@ -87,25 +84,28 @@ again: if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
87
84
}
88
85
mutex_exit (& tq -> tq_lock );
89
86
90
- t = kmem_alloc (sizeof (task_t ), tqflags );
87
+ t = kmem_alloc (sizeof (taskq_ent_t ), tqflags );
91
88
92
89
mutex_enter (& tq -> tq_lock );
93
- if (t != NULL )
90
+ if (t != NULL ) {
91
+ /* Make sure we start without any flags */
92
+ t -> tqent_flags = 0 ;
94
93
tq -> tq_nalloc ++ ;
94
+ }
95
95
}
96
96
return (t );
97
97
}
98
98
99
99
static void
100
- task_free (taskq_t * tq , task_t * t )
100
+ task_free (taskq_t * tq , taskq_ent_t * t )
101
101
{
102
102
if (tq -> tq_nalloc <= tq -> tq_minalloc ) {
103
- t -> task_next = tq -> tq_freelist ;
103
+ t -> tqent_next = tq -> tq_freelist ;
104
104
tq -> tq_freelist = t ;
105
105
} else {
106
106
tq -> tq_nalloc -- ;
107
107
mutex_exit (& tq -> tq_lock );
108
- kmem_free (t , sizeof (task_t ));
108
+ kmem_free (t , sizeof (taskq_ent_t ));
109
109
mutex_enter (& tq -> tq_lock );
110
110
}
111
111
@@ -116,7 +116,7 @@ task_free(taskq_t *tq, task_t *t)
116
116
taskqid_t
117
117
taskq_dispatch (taskq_t * tq , task_func_t func , void * arg , uint_t tqflags )
118
118
{
119
- task_t * t ;
119
+ taskq_ent_t * t ;
120
120
121
121
if (taskq_now ) {
122
122
func (arg );
@@ -130,26 +130,77 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags)
130
130
return (0 );
131
131
}
132
132
if (tqflags & TQ_FRONT ) {
133
- t -> task_next = tq -> tq_task .task_next ;
134
- t -> task_prev = & tq -> tq_task ;
133
+ t -> tqent_next = tq -> tq_task .tqent_next ;
134
+ t -> tqent_prev = & tq -> tq_task ;
135
135
} else {
136
- t -> task_next = & tq -> tq_task ;
137
- t -> task_prev = tq -> tq_task .task_prev ;
136
+ t -> tqent_next = & tq -> tq_task ;
137
+ t -> tqent_prev = tq -> tq_task .tqent_prev ;
138
138
}
139
- t -> task_next -> task_prev = t ;
140
- t -> task_prev -> task_next = t ;
141
- t -> task_func = func ;
142
- t -> task_arg = arg ;
139
+ t -> tqent_next -> tqent_prev = t ;
140
+ t -> tqent_prev -> tqent_next = t ;
141
+ t -> tqent_func = func ;
142
+ t -> tqent_arg = arg ;
143
+
144
+ ASSERT (!(t -> tqent_flags & TQENT_FLAG_PREALLOC ));
145
+
143
146
cv_signal (& tq -> tq_dispatch_cv );
144
147
mutex_exit (& tq -> tq_lock );
145
148
return (1 );
146
149
}
147
150
151
+ int
152
+ taskq_empty_ent (taskq_ent_t * t )
153
+ {
154
+ return t -> tqent_next == NULL ;
155
+ }
156
+
157
+ void
158
+ taskq_init_ent (taskq_ent_t * t )
159
+ {
160
+ t -> tqent_next = NULL ;
161
+ t -> tqent_prev = NULL ;
162
+ t -> tqent_func = NULL ;
163
+ t -> tqent_arg = NULL ;
164
+ t -> tqent_flags = 0 ;
165
+ }
166
+
167
+ void
168
+ taskq_dispatch_ent (taskq_t * tq , task_func_t func , void * arg , uint_t flags ,
169
+ taskq_ent_t * t )
170
+ {
171
+ ASSERT (func != NULL );
172
+ ASSERT (!(tq -> tq_flags & TASKQ_DYNAMIC ));
173
+
174
+ /*
175
+ * Mark it as a prealloc'd task. This is important
176
+ * to ensure that we don't free it later.
177
+ */
178
+ t -> tqent_flags |= TQENT_FLAG_PREALLOC ;
179
+ /*
180
+ * Enqueue the task to the underlying queue.
181
+ */
182
+ mutex_enter (& tq -> tq_lock );
183
+
184
+ if (flags & TQ_FRONT ) {
185
+ t -> tqent_next = tq -> tq_task .tqent_next ;
186
+ t -> tqent_prev = & tq -> tq_task ;
187
+ } else {
188
+ t -> tqent_next = & tq -> tq_task ;
189
+ t -> tqent_prev = tq -> tq_task .tqent_prev ;
190
+ }
191
+ t -> tqent_next -> tqent_prev = t ;
192
+ t -> tqent_prev -> tqent_next = t ;
193
+ t -> tqent_func = func ;
194
+ t -> tqent_arg = arg ;
195
+ cv_signal (& tq -> tq_dispatch_cv );
196
+ mutex_exit (& tq -> tq_lock );
197
+ }
198
+
148
199
void
149
200
taskq_wait (taskq_t * tq )
150
201
{
151
202
mutex_enter (& tq -> tq_lock );
152
- while (tq -> tq_task .task_next != & tq -> tq_task || tq -> tq_active != 0 )
203
+ while (tq -> tq_task .tqent_next != & tq -> tq_task || tq -> tq_active != 0 )
153
204
cv_wait (& tq -> tq_wait_cv , & tq -> tq_lock );
154
205
mutex_exit (& tq -> tq_lock );
155
206
}
@@ -158,27 +209,32 @@ static void
158
209
taskq_thread (void * arg )
159
210
{
160
211
taskq_t * tq = arg ;
161
- task_t * t ;
212
+ taskq_ent_t * t ;
213
+ boolean_t prealloc ;
162
214
163
215
mutex_enter (& tq -> tq_lock );
164
216
while (tq -> tq_flags & TASKQ_ACTIVE ) {
165
- if ((t = tq -> tq_task .task_next ) == & tq -> tq_task ) {
217
+ if ((t = tq -> tq_task .tqent_next ) == & tq -> tq_task ) {
166
218
if (-- tq -> tq_active == 0 )
167
219
cv_broadcast (& tq -> tq_wait_cv );
168
220
cv_wait (& tq -> tq_dispatch_cv , & tq -> tq_lock );
169
221
tq -> tq_active ++ ;
170
222
continue ;
171
223
}
172
- t -> task_prev -> task_next = t -> task_next ;
173
- t -> task_next -> task_prev = t -> task_prev ;
224
+ t -> tqent_prev -> tqent_next = t -> tqent_next ;
225
+ t -> tqent_next -> tqent_prev = t -> tqent_prev ;
226
+ t -> tqent_next = NULL ;
227
+ t -> tqent_prev = NULL ;
228
+ prealloc = t -> tqent_flags & TQENT_FLAG_PREALLOC ;
174
229
mutex_exit (& tq -> tq_lock );
175
230
176
231
rw_enter (& tq -> tq_threadlock , RW_READER );
177
- t -> task_func (t -> task_arg );
232
+ t -> tqent_func (t -> tqent_arg );
178
233
rw_exit (& tq -> tq_threadlock );
179
234
180
235
mutex_enter (& tq -> tq_lock );
181
- task_free (tq , t );
236
+ if (!prealloc )
237
+ task_free (tq , t );
182
238
}
183
239
tq -> tq_nthreads -- ;
184
240
cv_broadcast (& tq -> tq_wait_cv );
@@ -217,8 +273,8 @@ taskq_create(const char *name, int nthreads, pri_t pri,
217
273
tq -> tq_nthreads = nthreads ;
218
274
tq -> tq_minalloc = minalloc ;
219
275
tq -> tq_maxalloc = maxalloc ;
220
- tq -> tq_task .task_next = & tq -> tq_task ;
221
- tq -> tq_task .task_prev = & tq -> tq_task ;
276
+ tq -> tq_task .tqent_next = & tq -> tq_task ;
277
+ tq -> tq_task .tqent_prev = & tq -> tq_task ;
222
278
tq -> tq_threadlist = kmem_alloc (nthreads * sizeof (kthread_t * ), KM_SLEEP );
223
279
224
280
if (flags & TASKQ_PREPOPULATE ) {
0 commit comments