summaryrefslogtreecommitdiff
blob: 3d3c7b101d47a151795d9419fdadf8bf6a769060 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
From b35211fd58afcf430a0d95a243dc7a086d72b2b8 Mon Sep 17 00:00:00 2001
From: Jaco Kroon <jaco@uls.co.za>
Date: Fri, 21 May 2021 20:11:59 +0200
Subject: [PATCH] Replacement patch for v13.

Change-Id: I30236d7d7229f317c681fb7c6d7944d6108acd08
---
 funcs/func_lock.c | 234 +++++++++++++++++++++++++---------------------
 1 file changed, 126 insertions(+), 108 deletions(-)

diff --git a/funcs/func_lock.c b/funcs/func_lock.c
index a006a574eb..c472504f52 100644
--- a/funcs/func_lock.c
+++ b/funcs/func_lock.c
@@ -44,6 +44,7 @@ ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
 #include "asterisk/linkedlists.h"
 #include "asterisk/astobj2.h"
 #include "asterisk/utils.h"
+#include "asterisk/cli.h"
 
 /*** DOCUMENTATION
 	<function name="LOCK" language="en_US">
@@ -112,7 +113,6 @@ static AST_LIST_HEAD_STATIC(locklist, lock_frame);
 static void lock_free(void *data);
 static void lock_fixup(void *data, struct ast_channel *oldchan, struct ast_channel *newchan);
 static int unloading = 0;
-static pthread_t broker_tid = AST_PTHREADT_NULL;
 
 static const struct ast_datastore_info lock_info = {
 	.type = "MUTEX",
@@ -126,8 +126,8 @@ struct lock_frame {
 	ast_cond_t cond;
 	/*! count is needed so if a recursive mutex exits early, we know how many times to unlock it. */
 	unsigned int count;
-	/*! Container of requesters for the named lock */
-	struct ao2_container *requesters;
+	/*! Count of waiting of requesters for the named lock */
+	unsigned int requesters;
 	/*! who owns us */
 	struct ast_channel *owner;
 	/*! name of the lock */
@@ -149,14 +149,19 @@ static void lock_free(void *data)
 	while ((clframe = AST_LIST_REMOVE_HEAD(oldlist, list))) {
 		/* Only unlock if we own the lock */
 		if (clframe->channel == clframe->lock_frame->owner) {
+			ast_mutex_lock(&clframe->lock_frame->mutex);
 			clframe->lock_frame->count = 0;
 			clframe->lock_frame->owner = NULL;
+			ast_cond_signal(&clframe->lock_frame->cond);
+			ast_mutex_unlock(&clframe->lock_frame->mutex);
 		}
 		ast_free(clframe);
 	}
 	AST_LIST_UNLOCK(oldlist);
 	AST_LIST_HEAD_DESTROY(oldlist);
 	ast_free(oldlist);
+
+	ast_module_unref(ast_module_info->self);
 }
 
 static void lock_fixup(void *data, struct ast_channel *oldchan, struct ast_channel *newchan)
@@ -175,54 +180,11 @@ static void lock_fixup(void *data, struct ast_channel *oldchan, struct ast_chann
 		if (clframe->lock_frame->owner == oldchan) {
 			clframe->lock_frame->owner = newchan;
 		}
-		/* We don't move requesters, because the thread stack is different */
 		clframe->channel = newchan;
 	}
 	AST_LIST_UNLOCK(list);
 }
 
-static void *lock_broker(void *unused)
-{
-	struct lock_frame *frame;
-	struct timespec forever = { 1000000, 0 };
-	for (;;) {
-		int found_requester = 0;
-
-		/* Test for cancel outside of the lock */
-		pthread_testcancel();
-		AST_LIST_LOCK(&locklist);
-
-		AST_LIST_TRAVERSE(&locklist, frame, entries) {
-			if (ao2_container_count(frame->requesters)) {
-				found_requester++;
-				ast_mutex_lock(&frame->mutex);
-				if (!frame->owner) {
-					ast_cond_signal(&frame->cond);
-				}
-				ast_mutex_unlock(&frame->mutex);
-			}
-		}
-
-		AST_LIST_UNLOCK(&locklist);
-		pthread_testcancel();
-
-		/* If there are no requesters, then wait for a signal */
-		if (!found_requester) {
-			nanosleep(&forever, NULL);
-		} else {
-			sched_yield();
-		}
-	}
-	/* Not reached */
-	return NULL;
-}
-
-static int ast_channel_cmp_cb(void *obj, void *arg, int flags)
-{
-	struct ast_channel *chan = obj, *cmp_args = arg;
-	return strcasecmp(ast_channel_name(chan), ast_channel_name(cmp_args)) ? 0 : CMP_MATCH;
-}
-
 static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
 {
 	struct ast_datastore *lock_store = ast_channel_datastore_find(chan, &lock_info, NULL);
@@ -234,7 +196,12 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
 	struct timeval now;
 
 	if (!lock_store) {
-		ast_debug(1, "Channel %s has no lock datastore, so we're allocating one.\n", ast_channel_name(chan));
+		if (unloading) {
+			ast_log(LOG_ERROR, "%sLOCK has no datastore and func_lock is unloading, failing.\n",
+					trylock ? "TRY" : "");
+			return -1;
+		}
+
 		lock_store = ast_datastore_alloc(&lock_info, NULL);
 		if (!lock_store) {
 			ast_log(LOG_ERROR, "Unable to allocate new datastore.  No locks will be obtained.\n");
@@ -253,6 +220,9 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
 		lock_store->data = list;
 		AST_LIST_HEAD_INIT(list);
 		ast_channel_datastore_add(chan, lock_store);
+
+		/* We cannot unload until this channel has released the lock_store */
+		ast_module_ref(ast_module_info->self);
 	} else
 		list = lock_store->data;
 
@@ -266,6 +236,9 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
 
 	if (!current) {
 		if (unloading) {
+			ast_log(LOG_ERROR,
+				"Lock doesn't exist whilst unloading.  %sLOCK will fail.\n",
+				trylock ? "TRY" : "");
 			/* Don't bother */
 			AST_LIST_UNLOCK(&locklist);
 			return -1;
@@ -292,17 +265,12 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
 			AST_LIST_UNLOCK(&locklist);
 			return -1;
 		}
-		current->requesters = ao2_container_alloc_list(AO2_ALLOC_OPT_LOCK_MUTEX, 0,
-			NULL, ast_channel_cmp_cb);
-		if (!current->requesters) {
-			ast_mutex_destroy(&current->mutex);
-			ast_cond_destroy(&current->cond);
-			ast_free(current);
-			AST_LIST_UNLOCK(&locklist);
-			return -1;
-		}
 		AST_LIST_INSERT_TAIL(&locklist, current, entries);
 	}
+	/* Add to requester list */
+	ast_mutex_lock(&current->mutex);
+	current->requesters++;
+	ast_mutex_unlock(&current->mutex);
 	AST_LIST_UNLOCK(&locklist);
 
 	/* Found lock or created one - now find or create the corresponding link in the channel */
@@ -315,7 +283,13 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
 
 	if (!clframe) {
 		if (unloading) {
+			ast_log(LOG_ERROR,
+				"Busy unloading.  %sLOCK will fail.\n",
+				trylock ? "TRY" : "");
 			/* Don't bother */
+			ast_mutex_lock(&current->mutex);
+			current->requesters--;
+			ast_mutex_unlock(&current->mutex);
 			AST_LIST_UNLOCK(list);
 			return -1;
 		}
@@ -324,6 +298,9 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
 			ast_log(LOG_ERROR,
 				"Unable to allocate channel lock frame.  %sLOCK will fail.\n",
 				trylock ? "TRY" : "");
+			ast_mutex_lock(&current->mutex);
+			current->requesters--;
+			ast_mutex_unlock(&current->mutex);
 			AST_LIST_UNLOCK(list);
 			return -1;
 		}
@@ -339,44 +316,44 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
 	 * the same amount, before we'll release this one.
 	 */
 	if (current->owner == chan) {
+		/* We're not a requester, we already have it */
+		ast_mutex_lock(&current->mutex);
+		current->requesters--;
+		ast_mutex_unlock(&current->mutex);
 		current->count++;
 		return 0;
 	}
 
-	/* Okay, we have both frames, so now we need to try to lock.
-	 *
-	 * Locking order: always lock locklist first.  We need the
-	 * locklist lock because the broker thread counts whether
-	 * there are requesters with the locklist lock held, and we
-	 * need to hold it, so that when we send our signal, below,
-	 * to wake up the broker thread, it definitely will see that
-	 * a requester exists at that point in time.  Otherwise, we
-	 * could add to the requesters after it has already seen that
-	 * that lock is unoccupied and wait forever for another signal.
-	 */
-	AST_LIST_LOCK(&locklist);
-	ast_mutex_lock(&current->mutex);
-	/* Add to requester list */
-	ao2_link(current->requesters, chan);
-	pthread_kill(broker_tid, SIGURG);
-	AST_LIST_UNLOCK(&locklist);
-
 	/* Wait up to three seconds from now for LOCK. */
 	now = ast_tvnow();
 	timeout.tv_sec = now.tv_sec + 3;
 	timeout.tv_nsec = now.tv_usec * 1000;
 
-	if (!current->owner
-		|| (!trylock
-			&& !(res = ast_cond_timedwait(&current->cond, &current->mutex, &timeout)))) {
-		res = 0;
+	ast_mutex_lock(&current->mutex);
+
+	res = 0;
+	while (!trylock && !res && current->owner) {
+		res = ast_cond_timedwait(&current->cond, &current->mutex, &timeout);
+	}
+	if (current->owner) {
+		ast_log(LOG_ERROR, "%sLOCK failed to obtain lock %s.\n", trylock ? "TRY" : "",
+				lockname);
+		/* timeout;
+		 * trylock; or
+		 * cond_timedwait failed.
+		 *
+		 * either way, we fail to obtain the lock.
+		 */
+		res = -1;
+	} else {
 		current->owner = chan;
 		current->count++;
-	} else {
-		res = -1;
+		res = 0;
 	}
 	/* Remove from requester list */
-	ao2_unlink(current->requesters, chan);
+	current->requesters--;
+	if (res && unloading)
+		ast_cond_signal(&current->cond);
 	ast_mutex_unlock(&current->mutex);
 
 	return res;
@@ -400,7 +377,7 @@ static int unlock_read(struct ast_channel *chan, const char *cmd, char *data, ch
 	}
 
 	if (!(list = lock_store->data)) {
-		ast_debug(1, "This should NEVER happen\n");
+		ast_log(LOG_ERROR, "Datastore's data member is NULL ... this should be impossible.");
 		ast_copy_string(buf, "0", len);
 		return 0;
 	}
@@ -419,12 +396,17 @@ static int unlock_read(struct ast_channel *chan, const char *cmd, char *data, ch
 
 	if (!clframe) {
 		/* We didn't have this lock in the first place */
+		ast_log(LOG_WARNING, "Attempting to UNLOCK(%s) - a lock this channel never owned.\n",
+				data);
 		ast_copy_string(buf, "0", len);
 		return 0;
 	}
 
 	if (--clframe->lock_frame->count == 0) {
+		ast_mutex_lock(&clframe->lock_frame->mutex);
 		clframe->lock_frame->owner = NULL;
+		ast_cond_signal(&clframe->lock_frame->cond);
+		ast_mutex_unlock(&clframe->lock_frame->mutex);
 	}
 
 	ast_copy_string(buf, "1", len);
@@ -455,6 +437,37 @@ static int trylock_read(struct ast_channel *chan, const char *cmd, char *data, c
 	return 0;
 }
 
+static char *handle_cli_locks_show(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
+{
+	int c = 0;
+	struct lock_frame* current;
+	switch (cmd) {
+	case CLI_INIT:
+		e->command = "locks show";
+		e->usage =
+			"Usage: locks show\n"
+			"       List all locks known to func_lock, along with their current status.\n";
+		return NULL;
+	case CLI_GENERATE:
+		return NULL;
+	}
+
+	ast_cli(a->fd, "func_lock locks:\n");
+	ast_cli(a->fd, "%-40s Requesters Owner\n", "Name");
+	AST_LIST_LOCK(&locklist);
+	AST_LIST_TRAVERSE(&locklist, current, entries) {
+		ast_mutex_lock(&current->mutex);
+		ast_cli(a->fd, "%-40s %-10d %s\n", current->name, current->requesters,
+				current->owner ? ast_channel_name(current->owner) : "(unlocked)");
+		ast_mutex_unlock(&current->mutex);
+		c++;
+	}
+	AST_LIST_UNLOCK(&locklist);
+	ast_cli(a->fd, "%d total locks listed.\n", c);
+
+	return 0;
+}
+
 static struct ast_custom_function lock_function = {
 	.name = "LOCK",
 	.read = lock_read,
@@ -473,6 +486,8 @@ static struct ast_custom_function unlock_function = {
 	.read_max = 2,
 };
 
+static struct ast_cli_entry cli_locks_show = AST_CLI_DEFINE(handle_cli_locks_show, "List func_lock locks.");
+
 static int unload_module(void)
 {
 	struct lock_frame *current;
@@ -480,34 +495,43 @@ static int unload_module(void)
 	/* Module flag */
 	unloading = 1;
 
+	/* Make it impossible for new requesters to be added
+	 * NOTE:  channels could already be in get_lock() */
+	ast_custom_function_unregister(&lock_function);
+	ast_custom_function_unregister(&trylock_function);
+
+	ast_cli_unregister(&cli_locks_show);
+
 	AST_LIST_LOCK(&locklist);
 	while ((current = AST_LIST_REMOVE_HEAD(&locklist, entries))) {
-		/* If any locks are currently in use, then we cannot unload this module */
-		if (current->owner || ao2_container_count(current->requesters)) {
-			/* Put it back */
-			AST_LIST_INSERT_HEAD(&locklist, current, entries);
-			AST_LIST_UNLOCK(&locklist);
-			unloading = 0;
-			return -1;
+		int warned = 0;
+		ast_mutex_lock(&current->mutex);
+		while (current->owner || current->requesters) {
+			if (!warned) {
+				ast_log(LOG_WARNING, "Waiting for %d requesters for %s lock %s.\n",
+						current->requesters, current->owner ? "locked" : "unlocked",
+						current->name);
+				warned = 1;
+			}
+			/* either the mutex is locked, or other parties are currently in get_lock,
+			 * we need to wait for all of those to clear first */
+			ast_cond_wait(&current->cond, &current->mutex);
 		}
+		ast_mutex_unlock(&current->mutex);
+		/* At this point we know:
+		 * 1. the lock has been released,
+		 * 2. there are no requesters (nor should any be able to sneak in).
+		 */
 		ast_mutex_destroy(&current->mutex);
-		ao2_ref(current->requesters, -1);
+		ast_cond_destroy(&current->cond);
 		ast_free(current);
 	}
+	AST_LIST_UNLOCK(&locklist);
+	AST_LIST_HEAD_DESTROY(&locklist);
 
-	/* No locks left, unregister functions */
-	ast_custom_function_unregister(&lock_function);
-	ast_custom_function_unregister(&trylock_function);
+	/* At this point we can safely stop access to UNLOCK */
 	ast_custom_function_unregister(&unlock_function);
 
-	if (broker_tid != AST_PTHREADT_NULL) {
-		pthread_cancel(broker_tid);
-		pthread_kill(broker_tid, SIGURG);
-		pthread_join(broker_tid, NULL);
-	}
-
-	AST_LIST_UNLOCK(&locklist);
-
 	return 0;
 }
 
@@ -516,13 +540,7 @@ static int load_module(void)
 	int res = ast_custom_function_register_escalating(&lock_function, AST_CFE_READ);
 	res |= ast_custom_function_register_escalating(&trylock_function, AST_CFE_READ);
 	res |= ast_custom_function_register_escalating(&unlock_function, AST_CFE_READ);
-
-	if (ast_pthread_create_background(&broker_tid, NULL, lock_broker, NULL)) {
-		ast_log(LOG_ERROR, "Failed to start lock broker thread. Unloading func_lock module.\n");
-		broker_tid = AST_PTHREADT_NULL;
-		unload_module();
-		return AST_MODULE_LOAD_DECLINE;
-	}
+	res |= ast_cli_register(&cli_locks_show);
 
 	return res;
 }
-- 
2.26.3