summaryrefslogtreecommitdiff
path: root/lib/git/async/pool.py
blob: 30291835486658b7e851123f712f6d87b13c8779 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
"""Implementation of a thread-pool working with channels"""
from thread import WorkerThread

from threading import (
	Lock, 
	_Condition, 
	_sleep,
	_time,
	)

from task import InputChannelTask
from Queue import Queue, Empty
from collections import deque

from graph import (
		Graph, 
	)

from channel import (
		Channel,
		WChannel, 
		RChannel
	)

import weakref
import sys


#{ Utilities

class SyncQueue(deque):
	"""Adapter to allow using a deque like a queue, without locking"""
	def get(self, block=True, timeout=None):
		try:
			return self.pop()
		except IndexError:
			raise Empty
		# END raise empty
			
	def empty(self):
		return len(self) == 0
		
	put = deque.append
	
	
class HSCondition(_Condition):
	"""An attempt to make conditions less blocking, which gains performance 
	in return by sleeping less"""
	delay = 0.00002		# reduces wait times, but increases overhead
	
	def wait(self, timeout=None):
		waiter = Lock()
		waiter.acquire()
		self.__dict__['_Condition__waiters'].append(waiter)
		saved_state = self._release_save()
		try:	# restore state no matter what (e.g., KeyboardInterrupt)
			if timeout is None:
				waiter.acquire()
			else:
				# Balancing act:  We can't afford a pure busy loop, so we
				# have to sleep; but if we sleep the whole timeout time,
				# we'll be unresponsive.  The scheme here sleeps very
				# little at first, longer as time goes on, but never longer
				# than 20 times per second (or the timeout time remaining).
				endtime = _time() + timeout
				delay = self.delay
				acquire = waiter.acquire
				while True:
					gotit = acquire(0)
					if gotit:
						break
					remaining = endtime - _time()
					if remaining <= 0:
						break
					delay = min(delay * 2, remaining, .05)
					_sleep(delay)
				# END endless loop
				if not gotit:
					try:
						self.__dict__['_Condition__waiters'].remove(waiter)
					except ValueError:
						pass
				# END didn't ever get it
		finally:
			self._acquire_restore(saved_state)
			
	def notify(self, n=1):
		__waiters = self.__dict__['_Condition__waiters']
		if not __waiters:
			return
		if n == 1:
			__waiters[0].release()
			try:
				__waiters.pop(0)
			except IndexError:
				pass
		else:
			waiters = __waiters[:n]
			for waiter in waiters:
				waiter.release()
				try:
					__waiters.remove(waiter)
				except ValueError:
					pass
		# END handle n = 1 case faster
	
class PerfQueue(Queue):
	"""A queue using different condition objects to gain multithreading performance"""
	def __init__(self, maxsize=0):
		Queue.__init__(self, maxsize)
		
		self.not_empty = HSCondition(self.mutex)
		self.not_full = HSCondition(self.mutex)
		self.all_tasks_done = HSCondition(self.mutex)
		
	
#} END utilities
	
class RPoolChannel(RChannel):
	""" A read-only pool channel may not be wrapped or derived from, but it provides slots to call
	before and after an item is to be read.
	
	It acts like a handle to the underlying task in the pool."""
	__slots__ = ('_task', '_pool', '_pre_cb', '_post_cb')
	
	def __init__(self, wchannel, task, pool):
		RChannel.__init__(self, wchannel)
		self._task = task
		self._pool = pool
		self._pre_cb = None
		self._post_cb = None
		
	def __del__(self):
		"""Assures that our task will be deleted if we were the last reader"""
		del(self._wc)		# decrement ref-count
		self._pool._del_task_if_orphaned(self._task)
	
	def set_pre_cb(self, fun = lambda count: None):
		"""Install a callback to call with the item count to be read before any 
		item is actually  read from the channel.
		If it fails, the read will fail with an IOError
		If a function is not provided, the call is effectively uninstalled."""
		self._pre_cb = fun
	
	def set_post_cb(self, fun = lambda item: item):
		"""Install a callback to call after the items were read. The function
		returns a possibly changed item list. If it raises, the exception will be propagated.
		If a function is not provided, the call is effectively uninstalled."""
		self._post_cb = fun
	
	def read(self, count=0, block=True, timeout=None):
		"""Read an item that was processed by one of our threads
		:note: Triggers task dependency handling needed to provide the necessary 
			input"""
		if self._pre_cb:
			self._pre_cb()
		# END pre callback
		
		# if we have count items, don't do any queue preparation - if someone
		# depletes the queue in the meanwhile, the channel will close and 
		# we will unblock naturally
		have_enough = False
		if count > 0:
			# explicitly > count, as we want a certain safe range
			have_enough = self._wc._queue.qsize() > count
		# END risky game
		
		########## prepare ##############################
		if not have_enough:
			self._pool._prepare_channel_read(self._task, count)
		
		
		######### read data ######
		# read actual items, tasks were setup to put their output into our channel ( as well )
		items = RChannel.read(self, count, block, timeout)
		
		if self._post_cb:
			items = self._post_cb(items)
			
		
		####### Finalize ########
		self._pool._post_channel_read(self._task)
			
		return items
		
	#{ Internal
	def _read(self, count=0, block=False, timeout=None):
		"""Calls the underlying channel's read directly, without triggering 
		the pool"""
		return RChannel.read(self, count, block, timeout)
	
	#} END internal
	
	
class Pool(object):
	"""A thread pool maintains a set of one or more worker threads, but supports 
	a fully serial mode in which case the amount of threads is zero.
	
	Work is distributed via Channels, which form a dependency graph. The evaluation
	is lazy, as work will only be done once an output is requested.
	
	The thread pools inherent issue is the global interpreter lock that it will hit, 
	which gets worse considering a few c extensions specifically lock their part
	globally as well. The only way this will improve is if custom c extensions
	are written which do some bulk work, but release the GIL once they have acquired
	their resources.
	
	Due to the nature of having multiple objects in git, its easy to distribute 
	that work cleanly among threads.
	
	:note: the current implementation returns channels which are meant to be 
		used only from the main thread, hence you cannot consume their results 
		from multiple threads unless you use a task for it."""
	__slots__ = (	'_tasks',				# a graph of tasks
					'_consumed_tasks',		# a queue with tasks that are done or had an error
					'_workers',				# list of worker threads
					'_queue', 				# master queue for tasks
					'_taskorder_cache', 	# map task id -> ordered dependent tasks
					'_taskgraph_lock',		# lock for accessing the task graph
				)
	
	# CONFIGURATION
	# The type of worker to create - its expected to provide the Thread interface, 
	# taking the taskqueue as only init argument
	# as well as a method called stop_and_join() to terminate it
	WorkerCls = None
	
	# The type of lock to use to protect critical sections, providing the 
	# threading.Lock interface
	LockCls = None
	
	# the type of the task queue to use - it must provide the Queue interface
	TaskQueueCls = None
	
	
	def __init__(self, size=0):
		self._tasks = Graph()
		self._consumed_tasks = None
		self._workers = list()
		self._queue = SyncQueue()		# start with a sync queue
		self._taskgraph_lock = self.LockCls()
		self._taskorder_cache = dict()
		self.set_size(size)
		
	def __del__(self):
		self.set_size(0)
	
	#{ Internal
		
	def _prepare_channel_read(self, task, count):
		"""Process the tasks which depend on the given one to be sure the input 
		channels are filled with data once we process the actual task
		
		Tasks have two important states: either they are done, or they are done 
		and have an error, so they are likely not to have finished all their work.
		
		Either way, we will put them onto a list of tasks to delete them, providng 
		information about the failed ones.
		
		Tasks which are not done will be put onto the queue for processing, which 
		is fine as we walked them depth-first."""
		# for the walk, we must make sure the ordering does not change. Even 
		# when accessing the cache, as it is related to graph changes
		self._taskgraph_lock.acquire()
		try:
			try:
				dfirst_tasks = self._taskorder_cache[id(task)]
			except KeyError:
				# have to retrieve the list from the graph
				dfirst_tasks = list()
				self._tasks.visit_input_inclusive_depth_first(task, lambda n: dfirst_tasks.append(n))
				self._taskorder_cache[id(task)] = dfirst_tasks
			# END handle cached order retrieval
		finally:
			self._taskgraph_lock.release()
		# END handle locking
		
		# check the min count on all involved tasks, and be sure that we don't 
		# have any task which produces less than the maximum min-count of all tasks
		# The actual_count is used when chunking tasks up for the queue, whereas 
		# the count is usued to determine whether we still have enough output
		# on the queue, checking qsize ( ->revise )
		# ABTRACT: If T depends on T-1, and the client wants 1 item, T produces
		# at least 10, T-1 goes with 1, then T will block after 1 item, which 
		# is read by the client. On the next read of 1 item, we would find T's 
		# queue empty and put in another 10, which could put another thread into 
		# blocking state. T-1 produces one more item, which is consumed right away
		# by the two threads running T. Although this works in the end, it leaves
		# many threads blocking and waiting for input, which is not desired.
		# Setting the min-count to the max of the mincount of all tasks assures
		# we have enough items for all.
		# Addition: in serial mode, we would enter a deadlock if one task would
		# ever wait for items !
		actual_count = count
		min_counts = (((t.min_count is not None and t.min_count) or count) for t in dfirst_tasks)
		min_count = reduce(lambda m1, m2: max(m1, m2), min_counts)
		if 0 < count < min_count:
			actual_count = min_count
		# END set actual count
		
		# the list includes our tasks - the first one to evaluate first, the 
		# requested one last
		for task in dfirst_tasks: 
			if task.error() or task.is_done():
				self._consumed_tasks.put(task)
				continue
			# END skip processing
			
			# if the task does not have the required output on its queue, schedule
			# it for processing. If we should process all, we don't care about the 
			# amount as it should process until its all done.
			#if count > 1 and task._out_wc.size() >= count:
			#	continue
			# END skip if we have enough
			
			# but use the actual count to produce the output, we may produce 
			# more than requested
			numchunks = 1
			chunksize = actual_count
			remainder = 0
			
			# we need the count set for this - can't chunk up unlimited items
			# In serial mode we could do this by checking for empty input channels, 
			# but in dispatch mode its impossible ( == not easily possible )
			# Only try it if we have enough demand
			if task.max_chunksize and actual_count > task.max_chunksize:
				numchunks = actual_count / task.max_chunksize
				chunksize = task.max_chunksize
				remainder = actual_count - (numchunks * chunksize)
			# END handle chunking
			
			# the following loops are kind of unrolled - code duplication
			# should make things execute faster. Putting the if statements 
			# into the loop would be less code, but ... slower
			# DEBUG
			# print actual_count, numchunks, chunksize, remainder, task._out_wc.size()
			if self._workers:
				# respect the chunk size, and split the task up if we want 
				# to process too much. This can be defined per task
				queue = self._queue
				if numchunks > 1:
					for i in xrange(numchunks):
						queue.put((task.process, chunksize))
					# END for each chunk to put
				else:
					queue.put((task.process, chunksize))
				# END try efficient looping
				
				if remainder:
					queue.put((task.process, remainder))
				# END handle chunksize
			else:
				# no workers, so we have to do the work ourselves
				if numchunks > 1:
					for i in xrange(numchunks):
						task.process(chunksize)
					# END for each chunk to put
				else:
					task.process(chunksize)
				# END try efficient looping
				
				if remainder:
					task.process(remainder)
				# END handle chunksize
			# END handle serial mode
		# END for each task to process
		
		
	def _post_channel_read(self, task):
		"""Called after we processed a read to cleanup"""
		# check whether we consumed the task, and schedule it for deletion
		# This could have happend after the read returned ( even though the pre-read 
		# checks it as well )
		if task.error() or task.is_done():
			self._consumed_tasks.put(task)
		# END handle consumption
		
		# delete consumed tasks to cleanup
		try:
			while True:
				ct = self._consumed_tasks.get(False)
				self.del_task(ct)
			# END for each task to delete
		except Empty:
			pass
		# END pop queue empty
		
	def _del_task_if_orphaned(self, task):
		"""Check the task, and delete it if it is orphaned"""
		if sys.getrefcount(task._out_wc) < 3:
			self.del_task(task)
	#} END internal
	
	#{ Interface 
	def size(self):
		""":return: amount of workers in the pool"""
		return len(self._workers)
	
	def set_size(self, size=0):
		"""Set the amount of workers to use in this pool. When reducing the size, 
		the call may block as it waits for threads to finish. 
		When reducing the size to zero, this thread will process all remaining 
		items on the queue.
		
		:return: self
		:param size: if 0, the pool will do all work itself in the calling thread, 
			otherwise the work will be distributed among the given amount of threads
		
		:note: currently NOT threadsafe !"""
		assert size > -1, "Size cannot be negative"
		
		# either start new threads, or kill existing ones.
		# If we end up with no threads, we process the remaining chunks on the queue
		# ourselves
		cur_count = len(self._workers)
		if cur_count < size:
			# make sure we have a real queue, and can store our consumed tasks properly
			if not isinstance(self._queue, self.TaskQueueCls):
				if self._queue is not None and not self._queue.empty():
					raise AssertionError("Expected empty queue when switching the queue type")
				# END safety check
				self._queue = self.TaskQueueCls()
				self._consumed_tasks = Queue()
			# END init queue
			
			for i in range(size - cur_count):
				worker = self.WorkerCls(self._queue)
				worker.start()
				self._workers.append(worker)
			# END for each new worker to create
		elif cur_count > size:
			del_count = cur_count - size
			for i in range(del_count):
				self._workers[i].stop_and_join()
			# END for each thread to stop
			del(self._workers[:del_count])
		# END handle count
		
		if size == 0:
			while not self._queue.empty():
				try:
					taskmethod, count = self._queue.get(False)
					taskmethod(count)
				except Queue.Empty:
					continue
			# END while there are tasks on the queue
			
			# use a serial queue, its faster
			if not isinstance(self._queue, SyncQueue):
				self._queue = SyncQueue()
			# END handle queue type
			
			if self._consumed_tasks and not self._consumed_tasks.empty(): 
				self._post_channel_read(self._consumed_tasks.pop())
			# END assure consumed tasks are empty
			self._consumed_tasks = SyncQueue()
		# END process queue
		return self
		
	def num_tasks(self):
		""":return: amount of tasks"""
		return len(self._tasks.nodes)
		
	def del_task(self, task):
		"""Delete the task
		Additionally we will remove orphaned tasks, which can be identified if their 
		output channel is only held by themselves, so no one will ever consume 
		its items.
		
		:return: self"""
		# now delete our actual node - must set it done os it closes its channels.
		# Otherwise further reads of output tasks will block.
		# Actually they may still block if anyone wants to read all ... without 
		# a timeout
		# keep its input nodes as we check whether they were orphaned
		in_tasks = task.in_nodes
		task.set_done()
		self._taskgraph_lock.acquire()
		try:
			self._taskorder_cache.clear()
			self._tasks.del_node(task)
		finally:
			self._taskgraph_lock.release()
		# END locked deletion
		
		for t in in_tasks:
			self._del_task_if_orphaned(t)
		# END handle orphans recursively
		
		return self
	
	def add_task(self, task):
		"""Add a new task to be processed.
		:return: a read channel to retrieve processed items. If that handle is lost, 
			the task will be considered orphaned and will be deleted on the next 
			occasion."""
		# create a write channel for it
		wc, rc = Channel()
		rc = RPoolChannel(wc, task, self)
		task._out_wc = wc
		
		has_input_channel = isinstance(task, InputChannelTask) 
		if has_input_channel:
			task._pool_ref = weakref.ref(self)
		# END init input channel task
		
		self._taskgraph_lock.acquire()
		try:
			self._taskorder_cache.clear()
			self._tasks.add_node(task)
		finally:
			self._taskgraph_lock.release()
		# END sync task addition 
		
		# If the input channel is one of our read channels, we add the relation
		if has_input_channel:
			ic = task.in_rc
			if isinstance(ic, RPoolChannel) and ic._pool is self:
				self._taskgraph_lock.acquire()
				try:
					self._tasks.add_edge(ic._task, task)
				finally:
					self._taskgraph_lock.release()
				# END handle edge-adding
			# END add task relation
		# END handle input channels for connections
		
		return rc
			
	#} END interface 
	
	
class ThreadPool(Pool):
	"""A pool using threads as worker"""
	WorkerCls = WorkerThread
	LockCls = Lock
	TaskQueueCls = PerfQueue