summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/git/async/util.py77
-rw-r--r--test/git/async/test_pool.py14
2 files changed, 65 insertions, 26 deletions
diff --git a/lib/git/async/util.py b/lib/git/async/util.py
index e3556c05..fb63ccaa 100644
--- a/lib/git/async/util.py
+++ b/lib/git/async/util.py
@@ -2,6 +2,8 @@
from threading import (
Lock,
+ current_thread,
+ _allocate_lock,
_Condition,
_sleep,
_time,
@@ -57,7 +59,7 @@ class SyncQueue(deque):
"""Adapter to allow using a deque like a queue, without locking"""
def get(self, block=True, timeout=None):
try:
- return self.pop()
+ return self.popleft()
except IndexError:
raise Empty
# END raise empty
@@ -67,26 +69,45 @@ class SyncQueue(deque):
put = deque.append
-
-class HSCondition(_Condition):
+
+class HSCondition(object):
"""An attempt to make conditions less blocking, which gains performance
in return by sleeping less"""
+ __slots__ = ("acquire", "release", "_lock", '_waiters')
delay = 0.00002 # reduces wait times, but increases overhead
+ def __init__(self, lock=None):
+ if lock is None:
+ lock = Lock()
+ self._lock = lock
+ self.acquire = lock.acquire
+ self.release = lock.release
+ self._waiters = list()
+
+ def __release(self):
+ return self._lock.release()
+
+ def __acquire(self, block=None):
+ if block is None:
+ self._lock.acquire()
+ else:
+ return self._lock.acquire(block)
+
def wait(self, timeout=None):
- waiter = Lock()
- waiter.acquire()
- self.__dict__['_Condition__waiters'].append(waiter)
- saved_state = self._release_save()
+ waiter = _allocate_lock()
+ waiter.acquire() # get it the first time, no blocking
+ self._waiters.append(waiter)
+
+ # in the momemnt we release our lock, someone else might actually resume
+ self.release()
try: # restore state no matter what (e.g., KeyboardInterrupt)
+ # now we block, as we hold the lock already
if timeout is None:
waiter.acquire()
else:
- # Balancing act: We can't afford a pure busy loop, so we
- # have to sleep; but if we sleep the whole timeout time,
- # we'll be unresponsive. The scheme here sleeps very
- # little at first, longer as time goes on, but never longer
- # than 20 times per second (or the timeout time remaining).
+ # Balancing act: We can't afford a pure busy loop, because of the
+ # GIL, so we have to sleep
+ # We try to sleep only tiny amounts of time though to be very responsive
endtime = _time() + timeout
delay = self.delay
acquire = waiter.acquire
@@ -104,35 +125,49 @@ class HSCondition(_Condition):
# END endless loop
if not gotit:
try:
- self.__dict__['_Condition__waiters'].remove(waiter)
+ self._waiters.remove(waiter)
except ValueError:
pass
# END didn't ever get it
finally:
- self._acquire_restore(saved_state)
+ # reacquire the lock
+ self.acquire()
def notify(self, n=1):
- __waiters = self.__dict__['_Condition__waiters']
- if not __waiters:
+ if not self._waiters:
return
+ waiters = self._waiters
if n == 1:
- __waiters[0].release()
+ waiters[0].release()
try:
- __waiters.pop(0)
+ waiters.pop(0)
except IndexError:
pass
else:
- waiters = __waiters[:n]
- for waiter in waiters:
+ for waiter in waiters[:n]:
waiter.release()
try:
- __waiters.remove(waiter)
+ waiters.remove(waiter)
except ValueError:
pass
# END handle n = 1 case faster
+ def notify_all(self):
+ self.notify(len(self._waiters))
+
+
class AsyncQueue(Queue):
"""A queue using different condition objects to gain multithreading performance"""
+ def __init__(self, maxsize=0):
+ Queue.__init__(self, maxsize)
+
+ self.not_empty = HSCondition(self.mutex)
+ self.not_full = HSCondition(self.mutex)
+ self.all_tasks_done = HSCondition(self.mutex)
+
+
+class _AsyncQueue(Queue):
+ """A queue using different condition objects to gain multithreading performance"""
__slots__ = ('mutex', 'not_empty', 'queue')
def __init__(self, maxsize=0):
diff --git a/test/git/async/test_pool.py b/test/git/async/test_pool.py
index 0d779f39..4c20a9b2 100644
--- a/test/git/async/test_pool.py
+++ b/test/git/async/test_pool.py
@@ -136,8 +136,9 @@ class TestThreadPool(TestBase):
# rest - it has ni/2 - 2 on the queue, and pulls ni-2
# It wants too much, so the task realizes its done. The task
# doesn't care about the items in its output channel
- items = rc.read(ni-2)
- assert len(items) == ni - 2
+ nri = ni-2
+ items = rc.read(nri)
+ assert len(items) == nri
assert p.num_tasks() == null_tasks
task._assert(2, ni) # two chunks, ni calls
@@ -152,11 +153,14 @@ class TestThreadPool(TestBase):
# must read a specific item count
# count is still at ni / 2 - here we want more than that
# 2 steps with n / 4 items, + 1 step with n/4 items to get + 2
- assert len(rc.read(ni / 2 + 2)) == ni / 2 + 2
+ nri = ni / 2 + 2
+ items = rc.read(nri)
+ assert len(items) == nri
# have n / 4 - 2 items on queue, want n / 4 in first chunk, cause 1 processing
# ( 4 in total ). Still want n / 4 - 2 in second chunk, causing another processing
- items = rc.read(ni / 2 - 2)
- assert len(items) == ni / 2 - 2
+ nri = ni / 2 - 2
+ items = rc.read(nri)
+ assert len(items) == nri
task._assert( 5, ni)
assert p.num_tasks() == null_tasks # depleted