1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
|
import asyncio
import socket
import types
from unittest.mock import patch
import pytest
import redis
from redis.asyncio import Redis
from redis.asyncio.connection import (
BaseParser,
Connection,
HiredisParser,
PythonParser,
UnixDomainSocketConnection,
)
from redis.asyncio.retry import Retry
from redis.backoff import NoBackoff
from redis.exceptions import ConnectionError, InvalidResponse, TimeoutError
from redis.utils import HIREDIS_AVAILABLE
from tests.conftest import skip_if_server_version_lt
from .compat import mock
from .mocks import MockStream
@pytest.mark.onlynoncluster
async def test_invalid_response(create_redis):
r = await create_redis(single_connection_client=True)
raw = b"x"
fake_stream = MockStream(raw + b"\r\n")
parser: BaseParser = r.connection._parser
with mock.patch.object(parser, "_stream", fake_stream):
with pytest.raises(InvalidResponse) as cm:
await parser.read_response()
if isinstance(parser, PythonParser):
assert str(cm.value) == f"Protocol Error: {raw!r}"
else:
assert (
str(cm.value) == f'Protocol error, got "{raw.decode()}" as reply type byte'
)
await r.connection.disconnect()
@pytest.mark.onlynoncluster
async def test_asynckills():
for b in [True, False]:
r = Redis(single_connection_client=b)
await r.set("foo", "foo")
await r.set("bar", "bar")
t = asyncio.create_task(r.get("foo"))
await asyncio.sleep(1)
t.cancel()
try:
await t
except asyncio.CancelledError:
pytest.fail("connection left open with unread response")
assert await r.get("bar") == b"bar"
assert await r.ping()
assert await r.get("foo") == b"foo"
@pytest.mark.onlynoncluster
async def test_single_connection():
"""Test that concurrent requests on a single client are synchronised."""
r = Redis(single_connection_client=True)
init_call_count = 0
command_call_count = 0
in_use = False
class Retry_:
async def call_with_retry(self, _, __):
# If we remove the single-client lock, this error gets raised as two
# coroutines will be vying for the `in_use` flag due to the two
# asymmetric sleep calls
nonlocal command_call_count
nonlocal in_use
if in_use is True:
raise ValueError("Commands should be executed one at a time.")
in_use = True
await asyncio.sleep(0.01)
command_call_count += 1
await asyncio.sleep(0.03)
in_use = False
return "foo"
mock_conn = mock.MagicMock()
mock_conn.retry = Retry_()
async def get_conn(_):
# Validate only one client is created in single-client mode when
# concurrent requests are made
nonlocal init_call_count
await asyncio.sleep(0.01)
init_call_count += 1
return mock_conn
with mock.patch.object(r.connection_pool, "get_connection", get_conn):
with mock.patch.object(r.connection_pool, "release"):
await asyncio.gather(r.set("a", "b"), r.set("c", "d"))
assert init_call_count == 1
assert command_call_count == 2
@skip_if_server_version_lt("4.0.0")
@pytest.mark.redismod
@pytest.mark.onlynoncluster
async def test_loading_external_modules(modclient):
def inner():
pass
modclient.load_external_module("myfuncname", inner)
assert getattr(modclient, "myfuncname") == inner
assert isinstance(getattr(modclient, "myfuncname"), types.FunctionType)
# and call it
from redis.commands import RedisModuleCommands
j = RedisModuleCommands.json
modclient.load_external_module("sometestfuncname", j)
# d = {'hello': 'world!'}
# mod = j(modclient)
# mod.set("fookey", ".", d)
# assert mod.get('fookey') == d
async def test_socket_param_regression(r):
"""A regression test for issue #1060"""
conn = UnixDomainSocketConnection()
_ = await conn.disconnect() is True
async def test_can_run_concurrent_commands(r):
if getattr(r, "connection", None) is not None:
# Concurrent commands are only supported on pooled or cluster connections
# since there is no synchronization on a single connection.
pytest.skip("pool only")
assert await r.ping() is True
assert all(await asyncio.gather(*(r.ping() for _ in range(10))))
async def test_connect_retry_on_timeout_error():
"""Test that the _connect function is retried in case of a timeout"""
conn = Connection(retry_on_timeout=True, retry=Retry(NoBackoff(), 3))
origin_connect = conn._connect
conn._connect = mock.AsyncMock()
async def mock_connect():
# connect only on the last retry
if conn._connect.call_count <= 2:
raise socket.timeout
else:
return await origin_connect()
conn._connect.side_effect = mock_connect
await conn.connect()
assert conn._connect.call_count == 3
async def test_connect_without_retry_on_os_error():
"""Test that the _connect function is not being retried in case of a OSError"""
with patch.object(Connection, "_connect") as _connect:
_connect.side_effect = OSError("")
conn = Connection(retry_on_timeout=True, retry=Retry(NoBackoff(), 2))
with pytest.raises(ConnectionError):
await conn.connect()
assert _connect.call_count == 1
async def test_connect_timeout_error_without_retry():
"""Test that the _connect function is not being retried if retry_on_timeout is
set to False"""
conn = Connection(retry_on_timeout=False)
conn._connect = mock.AsyncMock()
conn._connect.side_effect = socket.timeout
with pytest.raises(TimeoutError) as e:
await conn.connect()
assert conn._connect.call_count == 1
assert str(e.value) == "Timeout connecting to server"
@pytest.mark.onlynoncluster
async def test_connection_parse_response_resume(r: redis.Redis):
"""
This test verifies that the Connection parser,
be that PythonParser or HiredisParser,
can be interrupted at IO time and then resume parsing.
"""
conn = Connection(**r.connection_pool.connection_kwargs)
await conn.connect()
message = (
b"*3\r\n$7\r\nmessage\r\n$8\r\nchannel1\r\n"
b"$25\r\nhi\r\nthere\r\n+how\r\nare\r\nyou\r\n"
)
conn._parser._stream = MockStream(message, interrupt_every=2)
for i in range(100):
try:
response = await conn.read_response()
break
except MockStream.TestError:
pass
else:
pytest.fail("didn't receive a response")
assert response
assert i > 0
@pytest.mark.onlynoncluster
@pytest.mark.parametrize(
"parser_class", [PythonParser, HiredisParser], ids=["PythonParser", "HiredisParser"]
)
async def test_connection_disconect_race(parser_class):
"""
This test reproduces the case in issue #2349
where a connection is closed while the parser is reading to feed the
internal buffer.The stream `read()` will succeed, but when it returns,
another task has already called `disconnect()` and is waiting for
close to finish. When we attempts to feed the buffer, we will fail
since the buffer is no longer there.
This test verifies that a read in progress can finish even
if the `disconnect()` method is called.
"""
if parser_class == HiredisParser and not HIREDIS_AVAILABLE:
pytest.skip("Hiredis not available")
args = {}
args["parser_class"] = parser_class
conn = Connection(**args)
cond = asyncio.Condition()
# 0 == initial
# 1 == reader is reading
# 2 == closer has closed and is waiting for close to finish
state = 0
# Mock read function, which wait for a close to happen before returning
# Can either be invoked as two `read()` calls (HiredisParser)
# or as a `readline()` followed by `readexact()` (PythonParser)
chunks = [b"$13\r\n", b"Hello, World!\r\n"]
async def read(_=None):
nonlocal state
async with cond:
if state == 0:
state = 1 # we are reading
cond.notify()
# wait until the closing task has done
await cond.wait_for(lambda: state == 2)
return chunks.pop(0)
# function closes the connection while reader is still blocked reading
async def do_close():
nonlocal state
async with cond:
await cond.wait_for(lambda: state == 1)
state = 2
cond.notify()
await conn.disconnect()
async def do_read():
return await conn.read_response()
reader = mock.AsyncMock()
writer = mock.AsyncMock()
writer.transport = mock.Mock()
writer.transport.get_extra_info.side_effect = None
# for HiredisParser
reader.read.side_effect = read
# for PythonParser
reader.readline.side_effect = read
reader.readexactly.side_effect = read
async def open_connection(*args, **kwargs):
return reader, writer
with patch.object(asyncio, "open_connection", open_connection):
await conn.connect()
vals = await asyncio.gather(do_read(), do_close())
assert vals == [b"Hello, World!", None]
|