diff --git a/benchmarks/python/all_async_100 b/benchmarks/python/all_async_100
new file mode 100644
index 0000000000..3a618d528a
--- /dev/null
+++ b/benchmarks/python/all_async_100
@@ -0,0 +1 @@
+[{"client": "redispy", "loop": "asyncio", "num_of_tasks": 10, "data_size": 100, "tps": 13004, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.717, "get_existing_p90_latency": 0.789, "get_existing_p99_latency": 0.65, "get_existing_average_latency": 0.746, "get_existing_std_dev": 2.788, "get_non_existing_p50_latency": 0.708, "get_non_existing_p90_latency": 0.779, "get_non_existing_p99_latency": 0.642, "get_non_existing_average_latency": 0.756, "get_non_existing_std_dev": 3.631, "set_p50_latency": 0.72, "set_p90_latency": 0.792, "set_p99_latency": 0.65, "set_average_latency": 0.748, "set_std_dev": 2.997}, {"client": "glide_socket", "loop": "asyncio", "num_of_tasks": 10, "data_size": 100, "tps": 14918, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.605, "get_existing_p90_latency": 0.893, "get_existing_p99_latency": 0.535, "get_existing_average_latency": 0.67, "get_existing_std_dev": 0.288, "get_non_existing_p50_latency": 0.599, "get_non_existing_p90_latency": 0.87, "get_non_existing_p99_latency": 0.531, "get_non_existing_average_latency": 0.657, "get_non_existing_std_dev": 0.267, "set_p50_latency": 0.602, "set_p90_latency": 0.882, "set_p99_latency": 0.536, "set_average_latency": 0.663, "set_std_dev": 0.262}, {"client": "glide_ffi", "loop": "asyncio", "num_of_tasks": 10, "data_size": 100, "tps": 14151, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.675, "get_existing_p90_latency": 0.923, "get_existing_p99_latency": 0.491, "get_existing_average_latency": 0.706, "get_existing_std_dev": 0.457, "get_non_existing_p50_latency": 0.663, "get_non_existing_p90_latency": 0.9, "get_non_existing_p99_latency": 0.475, "get_non_existing_average_latency": 0.692, "get_non_existing_std_dev": 0.403, "set_p50_latency": 0.671, "set_p90_latency": 0.9126, "set_p99_latency": 0.483, "set_average_latency": 0.699, "set_std_dev": 0.375}, {"client": "redispy", "loop": "asyncio", "num_of_tasks": 100, "data_size": 100, "tps": 8621, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 6.783, "get_existing_p90_latency": 6.96, "get_existing_p99_latency": 6.656, "get_existing_average_latency": 9.421, "get_existing_std_dev": 91.222, "get_non_existing_p50_latency": 6.775, "get_non_existing_p90_latency": 6.9515, "get_non_existing_p99_latency": 6.647, "get_non_existing_average_latency": 8.975, "get_non_existing_std_dev": 87.677, "set_p50_latency": 6.783, "set_p90_latency": 6.961, "set_p99_latency": 6.656, "set_average_latency": 9.436, "set_std_dev": 92.412}, {"client": "glide_socket", "loop": "asyncio", "num_of_tasks": 100, "data_size": 100, "tps": 23004, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 4.284, "get_existing_p90_latency": 4.644, "get_existing_p99_latency": 3.97, "get_existing_average_latency": 4.343, "get_existing_std_dev": 0.557, "get_non_existing_p50_latency": 4.28, "get_non_existing_p90_latency": 4.622, "get_non_existing_p99_latency": 3.968, "get_non_existing_average_latency": 4.332, "get_non_existing_std_dev": 0.547, "set_p50_latency": 4.286, "set_p90_latency": 4.646, "set_p99_latency": 3.972, "set_average_latency": 4.343, "set_std_dev": 0.541}, {"client": "glide_ffi", "loop": "asyncio", "num_of_tasks": 100, "data_size": 100, "tps": 13771, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 7.294, "get_existing_p90_latency": 7.871, "get_existing_p99_latency": 6.3654, "get_existing_average_latency": 7.246, "get_existing_std_dev": 2.574, "get_non_existing_p50_latency": 7.297, "get_non_existing_p90_latency": 7.88, "get_non_existing_p99_latency": 6.3863, "get_non_existing_average_latency": 7.266, "get_non_existing_std_dev": 2.668, "set_p50_latency": 7.286, "set_p90_latency": 7.867, "set_p99_latency": 6.362, "set_average_latency": 7.257, "set_std_dev": 2.879}, {"client": "redispy", "loop": "asyncio", "num_of_tasks": 1000, "data_size": 100, "tps": 1762, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 82.571, "get_existing_p90_latency": 83.368, "get_existing_p99_latency": 81.668, "get_existing_average_latency": 334.358, "get_existing_std_dev": 2854.132, "get_non_existing_p50_latency": 82.563, "get_non_existing_p90_latency": 83.337, "get_non_existing_p99_latency": 81.652, "get_non_existing_average_latency": 323.343, "get_non_existing_std_dev": 2750.045, "set_p50_latency": 82.584, "set_p90_latency": 83.384, "set_p99_latency": 81.677, "set_average_latency": 359.625, "set_std_dev": 2968.437}, {"client": "glide_socket", "loop": "asyncio", "num_of_tasks": 1000, "data_size": 100, "tps": 17204, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 59.274, "get_existing_p90_latency": 60.833, "get_existing_p99_latency": 41.586, "get_existing_average_latency": 57.663, "get_existing_std_dev": 10.129, "get_non_existing_p50_latency": 59.271, "get_non_existing_p90_latency": 60.9232, "get_non_existing_p99_latency": 41.578, "get_non_existing_average_latency": 57.8, "get_non_existing_std_dev": 10.301, "set_p50_latency": 59.293, "set_p90_latency": 60.898, "set_p99_latency": 41.6174, "set_average_latency": 57.83, "set_std_dev": 10.192}, {"client": "glide_ffi", "loop": "asyncio", "num_of_tasks": 1000, "data_size": 100, "tps": 12904, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 77.082, "get_existing_p90_latency": 79.81, "get_existing_p99_latency": 71.302, "get_existing_average_latency": 76.808, "get_existing_std_dev": 6.776, "get_non_existing_p50_latency": 77.094, "get_non_existing_p90_latency": 79.792, "get_non_existing_p99_latency": 71.5801, "get_non_existing_average_latency": 76.853, "get_non_existing_std_dev": 6.865, "set_p50_latency": 77.074, "set_p90_latency": 79.792, "set_p99_latency": 71.556, "set_average_latency": 76.837, "set_std_dev": 6.789}]
\ No newline at end of file
diff --git a/benchmarks/python/all_async_4000 b/benchmarks/python/all_async_4000
new file mode 100644
index 0000000000..1ce063b6b8
--- /dev/null
+++ b/benchmarks/python/all_async_4000
@@ -0,0 +1 @@
+[{"client": "redispy", "loop": "asyncio", "num_of_tasks": 10, "data_size": 4000, "tps": 12484, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.745, "get_existing_p90_latency": 0.831, "get_existing_p99_latency": 0.68, "get_existing_average_latency": 0.772, "get_existing_std_dev": 2.701, "get_non_existing_p50_latency": 0.734, "get_non_existing_p90_latency": 0.821, "get_non_existing_p99_latency": 0.672, "get_non_existing_average_latency": 0.807, "get_non_existing_std_dev": 4.013, "set_p50_latency": 0.764, "set_p90_latency": 0.84, "set_p99_latency": 0.687, "set_average_latency": 0.785, "set_std_dev": 2.781}, {"client": "glide_socket", "loop": "asyncio", "num_of_tasks": 10, "data_size": 4000, "tps": 15567, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.614, "get_existing_p90_latency": 0.757, "get_existing_p99_latency": 0.553, "get_existing_average_latency": 0.638, "get_existing_std_dev": 0.092, "get_non_existing_p50_latency": 0.608, "get_non_existing_p90_latency": 0.749, "get_non_existing_p99_latency": 0.548, "get_non_existing_average_latency": 0.631, "get_non_existing_std_dev": 0.091, "set_p50_latency": 0.625, "set_p90_latency": 0.755, "set_p99_latency": 0.565, "set_average_latency": 0.646, "set_std_dev": 0.085}, {"client": "glide_ffi", "loop": "asyncio", "num_of_tasks": 10, "data_size": 4000, "tps": 13752, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.703, "get_existing_p90_latency": 0.95, "get_existing_p99_latency": 0.511, "get_existing_average_latency": 0.727, "get_existing_std_dev": 0.374, "get_non_existing_p50_latency": 0.69, "get_non_existing_p90_latency": 0.919, "get_non_existing_p99_latency": 0.496, "get_non_existing_average_latency": 0.708, "get_non_existing_std_dev": 0.303, "set_p50_latency": 0.702, "set_p90_latency": 0.923, "set_p99_latency": 0.526, "set_average_latency": 0.723, "set_std_dev": 0.355}, {"client": "redispy", "loop": "asyncio", "num_of_tasks": 100, "data_size": 4000, "tps": 8570, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 6.837, "get_existing_p90_latency": 6.954, "get_existing_p99_latency": 6.7189, "get_existing_average_latency": 9.685, "get_existing_std_dev": 95.957, "get_non_existing_p50_latency": 6.829, "get_non_existing_p90_latency": 6.948, "get_non_existing_p99_latency": 6.713, "get_non_existing_average_latency": 9.875, "get_non_existing_std_dev": 104.295, "set_p50_latency": 6.844, "set_p90_latency": 6.962, "set_p99_latency": 6.728, "set_average_latency": 8.16, "set_std_dev": 61.441}, {"client": "glide_socket", "loop": "asyncio", "num_of_tasks": 100, "data_size": 4000, "tps": 22698, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 4.308, "get_existing_p90_latency": 4.572, "get_existing_p99_latency": 4.195, "get_existing_average_latency": 4.397, "get_existing_std_dev": 0.334, "get_non_existing_p50_latency": 4.309, "get_non_existing_p90_latency": 4.5769, "get_non_existing_p99_latency": 4.194, "get_non_existing_average_latency": 4.397, "get_non_existing_std_dev": 0.326, "set_p50_latency": 4.321, "set_p90_latency": 4.586, "set_p99_latency": 4.203, "set_average_latency": 4.41, "set_std_dev": 0.344}, {"client": "glide_ffi", "loop": "asyncio", "num_of_tasks": 100, "data_size": 4000, "tps": 13061, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 7.6885, "get_existing_p90_latency": 8.205, "get_existing_p99_latency": 6.9219, "get_existing_average_latency": 7.655, "get_existing_std_dev": 2.692, "get_non_existing_p50_latency": 7.686, "get_non_existing_p90_latency": 8.183, "get_non_existing_p99_latency": 6.877, "get_non_existing_average_latency": 7.597, "get_non_existing_std_dev": 2.041, "set_p50_latency": 7.696, "set_p90_latency": 8.2135, "set_p99_latency": 6.8742, "set_average_latency": 7.653, "set_std_dev": 2.668}, {"client": "redispy", "loop": "asyncio", "num_of_tasks": 1000, "data_size": 4000, "tps": 1772, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 83.394, "get_existing_p90_latency": 84.378, "get_existing_p99_latency": 82.555, "get_existing_average_latency": 338.39, "get_existing_std_dev": 2845.624, "get_non_existing_p50_latency": 83.39, "get_non_existing_p90_latency": 84.3097, "get_non_existing_p99_latency": 82.554, "get_non_existing_average_latency": 329.97, "get_non_existing_std_dev": 2793.609, "set_p50_latency": 83.421, "set_p90_latency": 84.4, "set_p99_latency": 82.571, "set_average_latency": 335.225, "set_std_dev": 2849.474}, {"client": "glide_socket", "loop": "asyncio", "num_of_tasks": 1000, "data_size": 4000, "tps": 18822, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 53.79, "get_existing_p90_latency": 58.0806, "get_existing_p99_latency": 37.1445, "get_existing_average_latency": 52.893, "get_existing_std_dev": 9.769, "get_non_existing_p50_latency": 53.756, "get_non_existing_p90_latency": 57.9729, "get_non_existing_p99_latency": 37.093, "get_non_existing_average_latency": 52.78, "get_non_existing_std_dev": 9.784, "set_p50_latency": 53.8145, "set_p90_latency": 58.0209, "set_p99_latency": 37.1686, "set_average_latency": 52.892, "set_std_dev": 9.779}, {"client": "glide_ffi", "loop": "asyncio", "num_of_tasks": 1000, "data_size": 4000, "tps": 12842, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 77.154, "get_existing_p90_latency": 81.394, "get_existing_p99_latency": 71.292, "get_existing_average_latency": 77.211, "get_existing_std_dev": 7.371, "get_non_existing_p50_latency": 77.11, "get_non_existing_p90_latency": 81.3546, "get_non_existing_p99_latency": 71.1546, "get_non_existing_average_latency": 77.205, "get_non_existing_std_dev": 7.54, "set_p50_latency": 77.218, "set_p90_latency": 81.426, "set_p99_latency": 71.1336, "set_average_latency": 77.223, "set_std_dev": 7.245}]
\ No newline at end of file
diff --git a/benchmarks/python/glide_sync_100 b/benchmarks/python/glide_sync_100
new file mode 100644
index 0000000000..d4762d2b9f
--- /dev/null
+++ b/benchmarks/python/glide_sync_100
@@ -0,0 +1 @@
+[{"client": "glide_sync_ffi", "data_size": 100, "tps": 4234, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.215, "get_existing_p90_latency": 0.335, "get_existing_p99_latency": 0.189, "get_existing_average_latency": 0.236, "get_existing_std_dev": 0.077, "get_non_existing_p50_latency": 0.195, "get_non_existing_p90_latency": 0.327, "get_non_existing_p99_latency": 0.185, "get_non_existing_average_latency": 0.221, "get_non_existing_std_dev": 0.069, "set_p50_latency": 0.207, "set_p90_latency": 0.335, "set_p99_latency": 0.196, "set_average_latency": 0.232, "set_std_dev": 0.07}, {"client": "glide_sync_uds", "data_size": 100, "tps": 3470, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.259, "get_existing_p90_latency": 0.439, "get_existing_p99_latency": 0.233, "get_existing_average_latency": 0.287, "get_existing_std_dev": 0.088, "get_non_existing_p50_latency": 0.24, "get_non_existing_p90_latency": 0.43, "get_non_existing_p99_latency": 0.23, "get_non_existing_average_latency": 0.274, "get_non_existing_std_dev": 0.08, "set_p50_latency": 0.25, "set_p90_latency": 0.436, "set_p99_latency": 0.239, "set_average_latency": 0.284, "set_std_dev": 0.095}]
\ No newline at end of file
diff --git a/benchmarks/python/glide_sync_100_ffi b/benchmarks/python/glide_sync_100_ffi
new file mode 100644
index 0000000000..6237cc1b23
--- /dev/null
+++ b/benchmarks/python/glide_sync_100_ffi
@@ -0,0 +1 @@
+[{"client": "glide_sync_ffi", "data_size": 100, "num_of_threads": 10, "tps": 22660, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.419, "get_existing_p90_latency": 0.5167, "get_existing_p99_latency": 0.368, "get_existing_average_latency": 0.434, "get_existing_std_dev": 0.084, "get_non_existing_p50_latency": 0.413, "get_non_existing_p90_latency": 0.512, "get_non_existing_p99_latency": 0.36, "get_non_existing_average_latency": 0.429, "get_non_existing_std_dev": 0.094, "set_p50_latency": 0.419, "set_p90_latency": 0.513, "set_p99_latency": 0.369, "set_average_latency": 0.433, "set_std_dev": 0.076}]
\ No newline at end of file
diff --git a/benchmarks/python/glide_sync_100_redispy b/benchmarks/python/glide_sync_100_redispy
new file mode 100644
index 0000000000..61bafec577
--- /dev/null
+++ b/benchmarks/python/glide_sync_100_redispy
@@ -0,0 +1 @@
+[{"client": "redispy", "data_size": 100, "num_of_threads": 10, "tps": 7240, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 1.272, "get_existing_p90_latency": 2.35, "get_existing_p99_latency": 0.664, "get_existing_average_latency": 1.479, "get_existing_std_dev": 2.1, "get_non_existing_p50_latency": 1.049, "get_non_existing_p90_latency": 1.746, "get_non_existing_p99_latency": 0.589, "get_non_existing_average_latency": 1.149, "get_non_existing_std_dev": 2.043, "set_p50_latency": 1.055, "set_p90_latency": 1.764, "set_p99_latency": 0.598, "set_average_latency": 1.211, "set_std_dev": 4.248}]
\ No newline at end of file
diff --git a/benchmarks/python/glide_sync_benchmark_100 b/benchmarks/python/glide_sync_benchmark_100
new file mode 100644
index 0000000000..89fc82efd6
--- /dev/null
+++ b/benchmarks/python/glide_sync_benchmark_100
@@ -0,0 +1 @@
+[{"client": "redispy", "data_size": 100, "tps": 4222, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.215, "get_existing_p90_latency": 0.331, "get_existing_p99_latency": 0.193, "get_existing_average_latency": 0.234, "get_existing_std_dev": 0.068, "get_non_existing_p50_latency": 0.198, "get_non_existing_p90_latency": 0.325, "get_non_existing_p99_latency": 0.191, "get_non_existing_average_latency": 0.222, "get_non_existing_std_dev": 0.065, "set_p50_latency": 0.211, "set_p90_latency": 0.334, "set_p99_latency": 0.203, "set_average_latency": 0.235, "set_std_dev": 0.064}, {"client": "glide_sync_ffi", "data_size": 100, "tps": 3902, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.241, "get_existing_p90_latency": 0.35, "get_existing_p99_latency": 0.202, "get_existing_average_latency": 0.257, "get_existing_std_dev": 0.081, "get_non_existing_p50_latency": 0.207, "get_non_existing_p90_latency": 0.331, "get_non_existing_p99_latency": 0.198, "get_non_existing_average_latency": 0.238, "get_non_existing_std_dev": 0.082, "set_p50_latency": 0.219, "set_p90_latency": 0.339, "set_p99_latency": 0.209, "set_average_latency": 0.249, "set_std_dev": 0.078}, {"client": "glide_sync_uds", "data_size": 100, "tps": 3736, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.247, "get_existing_p90_latency": 0.351, "get_existing_p99_latency": 0.224, "get_existing_average_latency": 0.265, "get_existing_std_dev": 0.069, "get_non_existing_p50_latency": 0.229, "get_non_existing_p90_latency": 0.333, "get_non_existing_p99_latency": 0.22, "get_non_existing_average_latency": 0.254, "get_non_existing_std_dev": 0.076, "set_p50_latency": 0.239, "set_p90_latency": 0.353, "set_p99_latency": 0.23, "set_average_latency": 0.266, "set_std_dev": 0.109}]
\ No newline at end of file
diff --git a/benchmarks/python/glide_sync_benchmark_16000 b/benchmarks/python/glide_sync_benchmark_16000
new file mode 100644
index 0000000000..00f4ebfdf7
--- /dev/null
+++ b/benchmarks/python/glide_sync_benchmark_16000
@@ -0,0 +1 @@
+language,client,is_cluster,num_of_tasks,data_size,client_count,tps,get_non_existing_p50_latency,get_non_existing_p90_latency,get_non_existing_p99_latency,get_non_existing_average_latency,get_non_existing_std_dev,get_existing_p50_latency,get_existing_p90_latency,get_existing_p99_latency,get_existing_average_latency,get_existing_std_dev,set_p50_latency,set_p90_latency,set_p99_latency,set_average_latency,set_std_dev
diff --git a/benchmarks/python/glide_sync_benchmark_4000 b/benchmarks/python/glide_sync_benchmark_4000
new file mode 100644
index 0000000000..0f6bbba0ca
--- /dev/null
+++ b/benchmarks/python/glide_sync_benchmark_4000
@@ -0,0 +1 @@
+[{"client": "redispy", "data_size": 4000, "tps": 4423, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.212, "get_existing_p90_latency": 0.256, "get_existing_p99_latency": 0.19, "get_existing_average_latency": 0.218, "get_existing_std_dev": 0.035, "get_non_existing_p50_latency": 0.195, "get_non_existing_p90_latency": 0.243, "get_non_existing_p99_latency": 0.188, "get_non_existing_average_latency": 0.207, "get_non_existing_std_dev": 0.031, "set_p50_latency": 0.233, "set_p90_latency": 0.275, "set_p99_latency": 0.224, "set_average_latency": 0.244, "set_std_dev": 0.033}, {"client": "glide_sync_ffi", "data_size": 4000, "tps": 4069, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.234, "get_existing_p90_latency": 0.316, "get_existing_p99_latency": 0.2, "get_existing_average_latency": 0.241, "get_existing_std_dev": 0.047, "get_non_existing_p50_latency": 0.204, "get_non_existing_p90_latency": 0.298, "get_non_existing_p99_latency": 0.196, "get_non_existing_average_latency": 0.22, "get_non_existing_std_dev": 0.046, "set_p50_latency": 0.244, "set_p90_latency": 0.339, "set_p99_latency": 0.232, "set_average_latency": 0.262, "set_std_dev": 0.046}, {"client": "glide_sync_uds", "data_size": 4000, "tps": 3881, "client_count": 1, "is_cluster": true, "get_existing_p50_latency": 0.245, "get_existing_p90_latency": 0.28, "get_existing_p99_latency": 0.221, "get_existing_average_latency": 0.25, "get_existing_std_dev": 0.037, "get_non_existing_p50_latency": 0.227, "get_non_existing_p90_latency": 0.261, "get_non_existing_p99_latency": 0.218, "get_non_existing_average_latency": 0.237, "get_non_existing_std_dev": 0.033, "set_p50_latency": 0.268, "set_p90_latency": 0.309, "set_p99_latency": 0.256, "set_average_latency": 0.281, "set_std_dev": 0.044}]
\ No newline at end of file
diff --git a/benchmarks/python/multithreaded_sync_100 b/benchmarks/python/multithreaded_sync_100
new file mode 100644
index 0000000000..0dbf074a80
--- /dev/null
+++ b/benchmarks/python/multithreaded_sync_100
@@ -0,0 +1,7 @@
+language,client,is_cluster,data_size,num_of_threads,client_count,tps,get_non_existing_p50_latency,get_non_existing_p90_latency,get_non_existing_p99_latency,get_non_existing_average_latency,get_non_existing_std_dev,get_existing_p50_latency,get_existing_p90_latency,get_existing_p99_latency,get_existing_average_latency,get_existing_std_dev,set_p50_latency,set_p90_latency,set_p99_latency,set_average_latency,set_std_dev
+python,redispy,True,100,1,1,4114,0.198,0.326,0.188,0.224,0.075,0.227,0.333,0.19,0.242,0.073,0.211,0.335,0.2,0.236,0.071
+python,glide_sync_ffi,True,100,1,1,4499,0.189,0.251,0.183,0.205,0.065,0.21,0.293,0.187,0.222,0.069,0.201,0.261,0.194,0.217,0.067
+python,redispy,True,100,10,1,7012,1.028,1.716,0.577,1.167,4.2,1.346,2.47,0.683,1.565,2.559,1.036,1.706,0.579,1.146,3.228
+python,glide_sync_ffi,True,100,10,1,22416,0.42,0.502,0.367,0.432,0.133,0.427,0.51,0.374,0.439,0.136,0.426,0.505,0.373,0.439,0.161
+python,redispy,True,100,100,1,5725,17.069,17.571,16.628,17.205,9.509,17.178,17.812,16.693,17.434,14.318,17.082,17.58,16.644,17.274,12.662
+python,glide_sync_ffi,True,100,100,1,20928,4.736,8.938,0.358,4.792,2.481,4.738,8.923,0.366,4.745,2.506,4.741,8.937,0.363,4.775,2.492
diff --git a/benchmarks/python/profile.svg b/benchmarks/python/profile.svg
new file mode 100644
index 0000000000..ae917687ee
--- /dev/null
+++ b/benchmarks/python/profile.svg
@@ -0,0 +1,491 @@
+
\ No newline at end of file
diff --git a/benchmarks/python/python_benchmark.py b/benchmarks/python/python_benchmark.py
index 580ac8a41d..05b157575c 100644
--- a/benchmarks/python/python_benchmark.py
+++ b/benchmarks/python/python_benchmark.py
@@ -18,6 +18,7 @@
from glide import (
GlideClientConfiguration,
GlideClusterClientConfiguration,
+ GlideAsync,
GlideClient,
GlideClusterClient,
Logger,
@@ -197,7 +198,12 @@ def latency_results(prefix, latencies):
async def create_clients(client_count, action):
- return [await action() for _ in range(client_count)]
+ try:
+ return [await action() for _ in range(client_count)]
+ except Exception as e:
+ print(e)
+ return [action() for _ in range(client_count)]
+
async def run_clients(
@@ -265,12 +271,12 @@ async def main(
use_tls,
is_cluster,
):
- if clients_to_run == "all":
+ if clients_to_run == "all" or clients_to_run == "redispy":
client_class = redispy.RedisCluster if is_cluster else redispy.Redis
clients = await create_clients(
client_count,
lambda: client_class(
- host=host, port=port, decode_responses=True, ssl=use_tls
+ host=host, port=port, decode_responses=False, ssl=use_tls
),
)
@@ -285,7 +291,10 @@ async def main(
)
for client in clients:
- await client.aclose()
+ try:
+ await client.aclose()
+ except Exception:
+ await client.close()
if clients_to_run == "all" or clients_to_run == "glide":
# Glide Socket
@@ -301,7 +310,28 @@ async def main(
)
await run_clients(
clients,
- "glide",
+ "glide_socket",
+ event_loop_name,
+ total_commands,
+ num_of_concurrent_tasks,
+ data_size,
+ is_cluster,
+ )
+ if clients_to_run == "all" or clients_to_run == "glide_ffi":
+ # Glide Socket
+ client_class = GlideAsync
+ config = GlideClusterClientConfiguration(
+ [NodeAddress(host=host, port=port)], use_tls=use_tls
+ ) if is_cluster else GlideClientConfiguration(
+ [NodeAddress(host=host, port=port)], use_tls=use_tls
+ )
+ clients = await create_clients(
+ client_count,
+ lambda: client_class(config),
+ )
+ await run_clients(
+ clients,
+ "glide_ffi",
event_loop_name,
total_commands,
num_of_concurrent_tasks,
@@ -311,6 +341,7 @@ async def main(
def number_of_iterations(num_of_concurrent_tasks):
+ return 100000000
return min(max(100000, num_of_concurrent_tasks * 10000), 5000000)
diff --git a/benchmarks/python/python_benchmark_sync.py b/benchmarks/python/python_benchmark_sync.py
new file mode 100644
index 0000000000..a2b6fc6df6
--- /dev/null
+++ b/benchmarks/python/python_benchmark_sync.py
@@ -0,0 +1,397 @@
+# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
+
+import argparse
+import asyncio
+import functools
+import json
+import math
+import random
+from threading import Thread, Lock
+import threading
+import time
+from datetime import datetime, timezone
+from enum import Enum
+from pathlib import Path
+from statistics import mean
+from typing import List
+
+import numpy as np
+import redis as redispy # type: ignore
+from glide import (
+ UDSGlideClusterClientSync,
+ UDSGlideClientSync,
+ GlideClusterClientConfiguration,
+ GlideClientConfiguration,
+ GlideSync,
+ Logger,
+ LogLevel,
+ NodeAddress,
+)
+
+
+class ChosenAction(Enum):
+ GET_NON_EXISTING = 1
+ GET_EXISTING = 2
+ SET = 3
+
+
+PORT = 6379
+
+arguments_parser = argparse.ArgumentParser()
+arguments_parser.add_argument(
+ "--resultsFile",
+ help="Where to write the results file",
+ required=False,
+ default="../results/python-results.json",
+)
+arguments_parser.add_argument(
+ "--dataSize", help="Size of data to set", required=False, default="100"
+)
+arguments_parser.add_argument(
+ "--concurrentTasks",
+ help="List of number of concurrent tasks to run",
+ nargs="+",
+ required=False,
+ default=("1", "10", "100"),
+)
+arguments_parser.add_argument(
+ "--clients", help="Which clients should run", required=False, default="all"
+)
+arguments_parser.add_argument(
+ "--host", help="What host to target", required=False, default="localhost"
+)
+arguments_parser.add_argument(
+ "--clientCount",
+ help="Number of clients to run concurrently",
+ nargs="+",
+ required=False,
+ default=("1"),
+)
+arguments_parser.add_argument(
+ "--tls",
+ help="Should benchmark a TLS server",
+ action="store_true",
+ required=False,
+ default=False,
+)
+arguments_parser.add_argument(
+ "--clusterModeEnabled",
+ help="Should benchmark a cluster mode enabled cluster",
+ action="store_true",
+ required=False,
+ default=False,
+)
+arguments_parser.add_argument(
+ "--port",
+ default=PORT,
+ type=int,
+ required=False,
+ help="Which port to connect to, defaults to `%(default)s`",
+)
+arguments_parser.add_argument(
+ "--minimal", help="Should run a minimal benchmark", action="store_true"
+)
+args = arguments_parser.parse_args()
+
+PROB_GET = 0.8
+PROB_GET_EXISTING_KEY = 0.8
+SIZE_GET_KEYSPACE = 3750000 # 3.75 million
+SIZE_SET_KEYSPACE = 3000000 # 3 million
+started_tasks_counter = 0
+running_tasks = set()
+bench_json_results: List[str] = []
+# Define locks for thread-safe access
+counter_lock = Lock()
+latencies_lock = Lock()
+
+
+def truncate_decimal(number: float, digits: int = 3) -> float:
+ stepper = 10**digits
+ return math.floor(number * stepper) / stepper
+
+
+def generate_value(size):
+ return str("0" * size)
+
+
+def generate_key_set():
+ return str(random.randint(1, SIZE_SET_KEYSPACE + 1))
+
+
+def generate_key_get():
+ return str(random.randint(SIZE_SET_KEYSPACE, SIZE_GET_KEYSPACE + 1))
+
+
+def choose_action():
+ if random.random() > PROB_GET:
+ return ChosenAction.SET
+ if random.random() > PROB_GET_EXISTING_KEY:
+ return ChosenAction.GET_NON_EXISTING
+ return ChosenAction.GET_EXISTING
+
+
+def calculate_latency(latency_list, percentile):
+ return round(np.percentile(np.array(latency_list), percentile), 4)
+
+
+def process_results():
+ global bench_json_results
+ global args
+
+ # write json results to a file
+ res_file_path = args.resultsFile
+ with open(res_file_path, "w+") as f:
+ json.dump(bench_json_results, f)
+
+
+def timer(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ tic = time.perf_counter()
+ func(*args, **kwargs)
+ toc = time.perf_counter()
+ return toc - tic
+
+ return wrapper
+
+# Global variables
+started_tasks_counter = 0
+
+@timer
+def create_and_run_concurrent_tasks(
+ clients, total_commands, num_of_concurrent_threads, data_size, action_latencies
+):
+ """
+ Create and run concurrent tasks using threads.
+ """
+ threads = []
+ global started_tasks_counter
+ started_tasks_counter = 0
+ for i in range(num_of_concurrent_threads):
+ thread = Thread(
+ target=execute_commands,
+ args=(clients, total_commands, data_size, action_latencies),
+ name=f"Worker-{i}"
+ )
+ threads.append(thread)
+ thread.start()
+
+ for thread in threads:
+ thread.join()
+
+def execute_commands(clients, total_commands, data_size, action_latencies):
+ """
+ Execute commands in a thread-safe manner.
+ """
+ global started_tasks_counter
+ while True:
+ # Safely increment the counter
+ with counter_lock:
+ if started_tasks_counter >= total_commands:
+ break
+ task_index = started_tasks_counter
+ started_tasks_counter += 1
+
+ chosen_action = choose_action()
+ client = clients[task_index % len(clients)]
+ tic = time.perf_counter()
+
+ if chosen_action == ChosenAction.GET_EXISTING:
+ res = client.get(generate_key_set())
+ elif chosen_action == ChosenAction.GET_NON_EXISTING:
+ res = client.get(generate_key_get())
+ elif chosen_action == ChosenAction.SET:
+ res = client.set(generate_key_set(), generate_value(data_size))
+
+ toc = time.perf_counter()
+ execution_time_milli = (toc - tic) * 1000
+
+ # Safely append latency data
+ with latencies_lock:
+ action_latencies[chosen_action].append(truncate_decimal(execution_time_milli))
+ print(f"thread {threading.current_thread().name} finished")
+ return True
+
+def latency_results(prefix, latencies):
+ result = {}
+ result[prefix + "_p50_latency"] = calculate_latency(latencies, 50)
+ result[prefix + "_p90_latency"] = calculate_latency(latencies, 90)
+ result[prefix + "_p99_latency"] = calculate_latency(latencies, 9)
+ result[prefix + "_average_latency"] = truncate_decimal(mean(latencies))
+ result[prefix + "_std_dev"] = truncate_decimal(np.std(latencies))
+
+ return result
+
+
+def create_clients(client_count, action):
+ return [action() for _ in range(client_count)]
+
+
+def run_clients(
+ clients,
+ client_name,
+ total_commands,
+ data_size,
+ is_cluster,
+ num_of_concurrent_threads
+):
+ now = datetime.now(timezone.utc).strftime("%H:%M:%S")
+ print(
+ f"Starting {client_name} data size: {data_size} number of threads: {num_of_concurrent_threads}"
+ f"client count: {len(clients)} {now}"
+ )
+ action_latencies = {
+ ChosenAction.GET_NON_EXISTING: list(),
+ ChosenAction.GET_EXISTING: list(),
+ ChosenAction.SET: list(),
+ }
+ time = create_and_run_concurrent_tasks(clients, total_commands, num_of_concurrent_threads, data_size, action_latencies)
+
+ tps = int(started_tasks_counter / time)
+ get_non_existing_latencies = action_latencies[ChosenAction.GET_NON_EXISTING]
+ get_non_existing_latency_results = latency_results(
+ "get_non_existing", get_non_existing_latencies
+ )
+
+ get_existing_latencies = action_latencies[ChosenAction.GET_EXISTING]
+ get_existing_latency_results = latency_results(
+ "get_existing", get_existing_latencies
+ )
+
+ set_latencies = action_latencies[ChosenAction.SET]
+ set_results = latency_results("set", set_latencies)
+
+ json_res = {
+ **{
+ "client": client_name,
+ "data_size": data_size,
+ "num_of_threads": num_of_concurrent_threads,
+ "tps": tps,
+ "client_count": len(clients),
+ "is_cluster": is_cluster,
+ },
+ **get_existing_latency_results,
+ **get_non_existing_latency_results,
+ **set_results,
+ }
+
+ bench_json_results.append(json_res)
+
+
+def main(
+ total_commands,
+ data_size,
+ clients_to_run,
+ host,
+ client_count,
+ use_tls,
+ is_cluster,
+):
+ if clients_to_run == "all" or clients_to_run == "redispy":
+ client_class = redispy.RedisCluster if is_cluster else redispy.Redis
+ clients = create_clients(
+ client_count,
+ lambda: client_class(
+ host=host, port=port, decode_responses=False, ssl=use_tls, max_connections=100
+ ),
+ )
+
+ run_clients(
+ clients,
+ "redispy",
+ total_commands,
+ data_size,
+ is_cluster,
+ num_of_concurrent_threads
+ )
+
+ for client in clients:
+ client.close()
+
+ if clients_to_run == "all" or clients_to_run == "glide_ffi":
+ # Glide Socket
+ # client_class = GlideClusterClient if is_cluster else GlideClient
+ # config = GlideClusterClientConfiguration(
+ # [NodeAddress(host=host, port=port)], use_tls=use_tls
+ # ) if is_cluster else GlideClientConfiguration(
+ # [NodeAddress(host=host, port=port)], use_tls=use_tls
+ # )
+ clients = create_clients(
+ client_count,
+ # lambda: client_class.create(config),
+ lambda: GlideSync(),
+ )
+ run_clients(
+ clients,
+ "glide_sync_ffi",
+ total_commands,
+ data_size,
+ is_cluster,
+ num_of_concurrent_threads
+ )
+
+
+ if clients_to_run == "all" or clients_to_run == "glide_uds":
+ # Glide Socket
+ client_class = UDSGlideClusterClientSync if is_cluster else UDSGlideClientSync
+ config = GlideClusterClientConfiguration(
+ [NodeAddress(host=host, port=port)], use_tls=use_tls
+ ) if is_cluster else GlideClientConfiguration(
+ [NodeAddress(host=host, port=port)], use_tls=use_tls
+ )
+ clients = create_clients(
+ client_count,
+ # lambda: client_class.create(config),
+ lambda: client_class.create(config),
+ )
+ run_clients(
+ clients,
+ "glide_sync_uds",
+ total_commands,
+ data_size,
+ is_cluster,
+ num_of_concurrent_threads
+ )
+
+
+def number_of_iterations(num_of_concurrent_threads):
+ return min(max(100000, num_of_concurrent_threads * 10000), 1000000)
+
+
+if __name__ == "__main__":
+ concurrent_tasks = args.concurrentTasks
+ data_size = int(args.dataSize)
+ clients_to_run = args.clients
+ client_count = args.clientCount
+ host = args.host
+ use_tls = args.tls
+ port = args.port
+ is_cluster = args.clusterModeEnabled
+
+ # Setting the internal logger to log every log that has a level of info and above,
+ # and save the logs to a file with the name of the results file.
+ Logger.set_logger_config(LogLevel.INFO, Path(args.resultsFile).stem)
+
+ product_of_arguments = [
+ (data_size, int(num_of_concurrent_threads), int(number_of_clients))
+ for num_of_concurrent_threads in concurrent_tasks
+ for number_of_clients in client_count
+ if int(number_of_clients) <= int(num_of_concurrent_threads)
+ ]
+
+ for data_size, num_of_concurrent_threads, number_of_clients in product_of_arguments:
+ iterations = (
+ 1000 if args.minimal else number_of_iterations(num_of_concurrent_threads)
+ )
+ main(
+ iterations,
+ data_size,
+ clients_to_run,
+ host,
+ number_of_clients,
+ use_tls,
+ is_cluster,
+ )
+
+
+ process_results()
diff --git a/benchmarks/python/python_sync_bench_results.csv b/benchmarks/python/python_sync_bench_results.csv
new file mode 100644
index 0000000000..5fe8070133
--- /dev/null
+++ b/benchmarks/python/python_sync_bench_results.csv
@@ -0,0 +1,10 @@
+language,client,is_cluster,data_size,client_count,tps,get_non_existing_p50_latency,get_non_existing_p90_latency,get_non_existing_p99_latency,get_non_existing_average_latency,get_non_existing_std_dev,get_existing_p50_latency,get_existing_p90_latency,get_existing_p99_latency,get_existing_average_latency,get_existing_std_dev,set_p50_latency,set_p90_latency,set_p99_latency,set_average_latency,set_std_dev
+python,glide_sync_ffi,True,100,1,4575,0.183,0.278,0.175,0.203,0.071,0.204,0.3,0.178,0.218,0.068,0.194,0.302,0.186,0.214,0.061
+python,glide_sync_uds,True,100,1,4126,0.216,0.251,0.207,0.227,0.06,0.234,0.281,0.211,0.242,0.066,0.226,0.26,0.217,0.236,0.036
+python,redispy,True,100,1,4129,0.213,0.258,0.207,0.227,0.065,0.229,0.29,0.209,0.24,0.054,0.226,0.271,0.219,0.24,0.087
+python,glide_sync_ffi,True,4000,1,4311,0.203,0.237,0.196,0.211,0.027,0.222,0.261,0.199,0.226,0.032,0.242,0.277,0.232,0.25,0.028
+python,glide_sync_uds,True,4000,1,3921,0.218,0.262,0.207,0.233,0.044,0.237,0.291,0.211,0.248,0.049,0.258,0.311,0.245,0.275,0.052
+python,redispy,True,4000,1,4410,0.195,0.239,0.189,0.205,0.032,0.217,0.261,0.191,0.22,0.036,0.231,0.272,0.224,0.242,0.033
+python,glide_sync_ffi,True,16000,1,4059,0.187,0.323,0.175,0.212,0.067,0.206,0.332,0.179,0.228,0.074,0.292,0.393,0.272,0.315,0.086
+python,glide_sync_uds,True,16000,1,3616,0.217,0.33,0.207,0.241,0.07,0.237,0.3412,0.211,0.257,0.071,0.319,0.478,0.305,0.348,0.104
+python,redispy,True,16000,1,3762,0.209,0.319,0.201,0.232,0.072,0.231,0.328,0.203,0.247,0.07,0.305,0.467,0.293,0.331,0.073
diff --git a/benchmarks/python/redispy_async_100_10 b/benchmarks/python/redispy_async_100_10
new file mode 100644
index 0000000000..0637a088a0
--- /dev/null
+++ b/benchmarks/python/redispy_async_100_10
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/benchmarks/python/redispy_async_100_1000 b/benchmarks/python/redispy_async_100_1000
new file mode 100644
index 0000000000..0637a088a0
--- /dev/null
+++ b/benchmarks/python/redispy_async_100_1000
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/benchmarks/utilities/csv_exporter.py b/benchmarks/utilities/csv_exporter.py
index 14004d0532..d533f10f45 100755
--- a/benchmarks/utilities/csv_exporter.py
+++ b/benchmarks/utilities/csv_exporter.py
@@ -14,8 +14,8 @@
"language",
"client",
"is_cluster",
- "num_of_tasks",
"data_size",
+ "num_of_threads",
"client_count",
"tps",
"get_non_existing_p50_latency",
@@ -43,10 +43,11 @@
json_file_name = os.path.basename(json_file_full_path)
- languages = ["csharp", "node", "python", "rust", "java", "go"]
- language = next(
- (language for language in languages if language in json_file_name), None
- )
+ # languages = ["csharp", "node", "python", "rust", "java", "go"]
+ # language = next(
+ # (language for language in languages if language in json_file_name), None
+ # )
+ language = "python"
if not language:
raise Exception(f"Unknown language for {json_file_name}")
diff --git a/glide-core/redis-rs/redis/src/cluster_async/connections_container.rs b/glide-core/redis-rs/redis/src/cluster_async/connections_container.rs
index 955d24d9e9..6cfe9f4c0d 100644
--- a/glide-core/redis-rs/redis/src/cluster_async/connections_container.rs
+++ b/glide-core/redis-rs/redis/src/cluster_async/connections_container.rs
@@ -427,6 +427,10 @@ where
}
}
+ // pub(crate) fn get_node(&self, address: &String) -> Option<&ClusterNode> {
+ // self.connection_map.get(address)
+ // }
+
pub(crate) fn len(&self) -> usize {
self.connection_map.len()
}
diff --git a/glide-core/redis-rs/redis/src/cluster_async/mod.rs b/glide-core/redis-rs/redis/src/cluster_async/mod.rs
index 8164d09413..6a4cbf8fe7 100644
--- a/glide-core/redis-rs/redis/src/cluster_async/mod.rs
+++ b/glide-core/redis-rs/redis/src/cluster_async/mod.rs
@@ -1386,7 +1386,10 @@ where
tasks.push(async move {
let node_option = if check_existing_conn {
let connections_container = inner.conn_lock.read().expect(MUTEX_READ_ERR);
- connections_container.remove_node(&address)
+ connections_container
+ .connection_map()
+ .get(&address)
+ .map(|node| node.value().clone())
} else {
None
};
diff --git a/glide-core/src/socket_listener.rs b/glide-core/src/socket_listener.rs
index 4896f83565..192c2f5173 100644
--- a/glide-core/src/socket_listener.rs
+++ b/glide-core/src/socket_listener.rs
@@ -808,7 +808,8 @@ pub fn get_socket_path() -> String {
pub fn start_socket_listener_internal(
init_callback: InitCallback,
socket_path: Option,
-) where
+) -> String
+where
InitCallback: FnOnce(Result) + Send + Clone + 'static,
{
static INITIALIZED_SOCKETS: Lazy>> =
@@ -823,7 +824,7 @@ pub fn start_socket_listener_internal(
.expect("Failed to acquire sockets db read guard");
if initialized_sockets.contains(&socket_path) {
init_callback(Ok(socket_path.clone()));
- return;
+ return socket_path;
}
}
@@ -833,7 +834,7 @@ pub fn start_socket_listener_internal(
.expect("Failed to acquire sockets db write guard");
if sockets_write_guard.contains(&socket_path) {
init_callback(Ok(socket_path.clone()));
- return;
+ return socket_path;
}
let (tx, rx) = std::sync::mpsc::channel();
@@ -914,9 +915,10 @@ pub fn start_socket_listener_internal(
// wait for thread initialization signaling, callback invocation is done in the thread
let _ = rx.recv().map(|res| {
if res {
- sockets_write_guard.insert(socket_path);
+ sockets_write_guard.insert(socket_path.clone());
}
});
+ socket_path
}
/// Creates a new thread with a main loop task listening on the socket for new connections.
@@ -924,9 +926,9 @@ pub fn start_socket_listener_internal(
///
/// # Arguments
/// * `init_callback` - called when the socket listener fails to initialize, with the reason for the failure.
-pub fn start_socket_listener(init_callback: InitCallback)
+pub fn start_socket_listener(init_callback: InitCallback) -> String
where
InitCallback: FnOnce(Result) + Send + Clone + 'static,
{
- start_socket_listener_internal(init_callback, None);
+ start_socket_listener_internal(init_callback, None)
}
diff --git a/go/DEVELOPER.md b/go/DEVELOPER.md
index 5619b7f7b2..12562c9e0f 100644
--- a/go/DEVELOPER.md
+++ b/go/DEVELOPER.md
@@ -105,32 +105,28 @@ Before starting this step, make sure you've installed all software requirements.
git clone --branch ${VERSION} https://github.com/valkey-io/valkey-glide.git
cd valkey-glide
```
-2. Initialize git submodules:
- ```bash
- git submodule update --init --recursive
- ```
-3. Install build dependencies:
+2. Install build dependencies:
```bash
cd go
make install-build-tools
```
-4. If on CentOS or Ubuntu, add the glide-rs library to LD_LIBRARY_PATH:
+3. If on CentOS or Ubuntu, add the glide-rs library to LD_LIBRARY_PATH:
```bash
# Replace "" with the path to the valkey-glide root, eg "$HOME/Projects/valkey-glide"
GLIDE_ROOT_FOLDER_PATH=
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GLIDE_ROOT_FOLDER_PATH/go/target/release/deps/
```
-5. Build the Go wrapper:
+4. Build the Go wrapper:
```bash
make build
```
-6. Run tests:
+5. Run tests:
1. Ensure that you have installed valkey-server and valkey-cli on your host. You can find the Valkey installation guide at the following link: [Valkey Installation Guide](https://github.com/valkey-io/valkey).
2. Execute the following command from the go folder:
```bash
go test -race ./...
```
-7. Install Go development tools with:
+6. Install Go development tools with:
```bash
# For go1.22:
make install-dev-tools
diff --git a/go/src/lib.rs b/go/src/lib.rs
index 361bf320f6..e6d8ba985d 100644
--- a/go/src/lib.rs
+++ b/go/src/lib.rs
@@ -7,6 +7,7 @@ use glide_core::errors;
use glide_core::errors::RequestErrorType;
use glide_core::request_type::RequestType;
use glide_core::ConnectionRequest;
+use glide_core::client::{NodeAddress, TlsMode};
use protobuf::Message;
use redis::{RedisResult, Value};
use std::slice::from_raw_parts;
@@ -112,35 +113,30 @@ pub type FailureCallback = unsafe extern "C" fn(
error_type: RequestErrorType,
) -> ();
-/// The connection response.
-///
-/// It contains either a connection or an error. It is represented as a struct instead of a union for ease of use in the wrapper language.
-///
-/// The struct is freed by the external caller by using `free_connection_response` to avoid memory leaks.
-#[repr(C)]
-pub struct ConnectionResponse {
- conn_ptr: *const c_void,
- connection_error_message: *const c_char,
-}
+ /// The connection response.
+ ///
+ /// It contains either a connection or an error. It is represented as a struct instead of a union for ease of use in the wrapper language.
+ ///
+ /// The struct is freed by the external caller by using `free_connection_response` to avoid memory leaks.
+ #[repr(C)]
+ pub struct ConnectionResponse {
+ conn_ptr: *const c_void,
+ connection_error_message: *const c_char,
+ }
/// A `GlideClient` adapter.
// TODO: Remove allow(dead_code) once connection logic is implemented
#[allow(dead_code)]
pub struct ClientAdapter {
client: GlideClient,
- success_callback: SuccessCallback,
- failure_callback: FailureCallback,
runtime: Runtime,
}
fn create_client_internal(
connection_request_bytes: &[u8],
- success_callback: SuccessCallback,
- failure_callback: FailureCallback,
) -> Result {
let request = connection_request::ConnectionRequest::parse_from_bytes(connection_request_bytes)
.map_err(|err| err.to_string())?;
- // TODO: optimize this using multiple threads instead of a single worker thread (e.g. by pinning each go thread to a rust thread)
let runtime = Builder::new_multi_thread()
.enable_all()
.worker_threads(1)
@@ -155,8 +151,6 @@ fn create_client_internal(
.map_err(|err| err.to_string())?;
Ok(ClientAdapter {
client,
- success_callback,
- failure_callback,
runtime,
})
}
@@ -179,15 +173,10 @@ fn create_client_internal(
/// * Both the `success_callback` and `failure_callback` function pointers need to live while the client is open/active. The caller is responsible for freeing both callbacks.
// TODO: Consider making this async
#[no_mangle]
-pub unsafe extern "C" fn create_client(
- connection_request_bytes: *const u8,
- connection_request_len: usize,
- success_callback: SuccessCallback,
- failure_callback: FailureCallback,
-) -> *const ConnectionResponse {
+pub unsafe extern "C" fn create_client(connection_request_bytes: *const u8, connection_request_len: usize) -> *const ConnectionResponse {
let request_bytes =
unsafe { std::slice::from_raw_parts(connection_request_bytes, connection_request_len) };
- let response = match create_client_internal(request_bytes, success_callback, failure_callback) {
+ let response = match create_client_internal(request_bytes) {
Err(err) => ConnectionResponse {
conn_ptr: std::ptr::null(),
connection_error_message: CString::into_raw(
@@ -515,61 +504,46 @@ pub unsafe extern "C" fn command(
arg_count: c_ulong,
args: *const usize,
args_len: *const c_ulong,
-) {
+) -> *mut CommandResponse {
let client_adapter =
unsafe { Box::leak(Box::from_raw(client_adapter_ptr as *mut ClientAdapter)) };
- // The safety of this needs to be ensured by the calling code. Cannot dispose of the pointer before
- // all operations have completed.
- let ptr_address = client_adapter_ptr as usize;
+ // Ensure the arguments are converted properly
let arg_vec =
unsafe { convert_double_pointer_to_vec(args as *const *const c_void, arg_count, args_len) };
let mut client_clone = client_adapter.client.clone();
// Create the command outside of the task to ensure that the command arguments passed
- // from "go" are still valid
+ // from the caller are still valid
let mut cmd = command_type
.get_command()
.expect("Couldn't fetch command type");
+
for command_arg in arg_vec {
cmd.arg(command_arg);
}
- client_adapter.runtime.spawn(async move {
- let result = client_clone.send_command(&cmd, None).await;
- let client_adapter = unsafe { Box::leak(Box::from_raw(ptr_address as *mut ClientAdapter)) };
- let value = match result {
- Ok(value) => value,
- Err(err) => {
- let message = errors::error_message(&err);
- let error_type = errors::error_type(&err);
-
- let c_err_str = CString::into_raw(
- CString::new(message).expect("Couldn't convert error message to CString"),
- );
- unsafe { (client_adapter.failure_callback)(channel, c_err_str, error_type) };
- return;
- }
- };
-
- let result: RedisResult = valkey_value_to_command_response(value);
+ // Block on the async task to execute the command
+ let result = client_adapter.runtime.block_on(async move {
+ client_clone.send_command(&cmd, None).await
+ });
- unsafe {
- match result {
- Ok(message) => {
- (client_adapter.success_callback)(channel, Box::into_raw(Box::new(message)))
- }
+ match result {
+ Ok(value) => {
+ // Convert the value to a CommandResponse
+ match valkey_value_to_command_response(value) {
+ Ok(command_response) => Box::into_raw(Box::new(command_response)), // Return a pointer to the CommandResponse
Err(err) => {
- let message = errors::error_message(&err);
- let error_type = errors::error_type(&err);
-
- let c_err_str = CString::into_raw(
- CString::new(message).expect("Couldn't convert error message to CString"),
- );
- (client_adapter.failure_callback)(channel, c_err_str, error_type);
+ eprintln!("Error converting value to CommandResponse: {:?}", err);
+ std::ptr::null_mut()
}
- };
+ }
}
- });
+ Err(err) => {
+ // Handle the error case
+ eprintln!("Error executing command: {:?}", err);
+ std::ptr::null_mut()
+ }
+ }
}
diff --git a/go/src/lib_async.rs b/go/src/lib_async.rs
new file mode 100644
index 0000000000..376da58dfa
--- /dev/null
+++ b/go/src/lib_async.rs
@@ -0,0 +1,651 @@
+// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
+
+#![deny(unsafe_op_in_unsafe_fn)]
+use glide_core::client::Client as GlideClient;
+use glide_core::command_request::SimpleRoutes;
+use glide_core::command_request::{Routes, SlotTypes};
+use glide_core::connection_request;
+use glide_core::errors;
+use glide_core::errors::RequestErrorType;
+use glide_core::request_type::RequestType;
+use glide_core::ConnectionRequest;
+use protobuf::Message;
+use redis::cluster_routing::{
+ MultipleNodeRoutingInfo, Route, RoutingInfo, SingleNodeRoutingInfo, SlotAddr,
+};
+use redis::cluster_routing::{ResponsePolicy, Routable};
+use redis::{Cmd, RedisResult, Value};
+use std::slice::from_raw_parts;
+use std::{
+ ffi::{c_void, CString},
+ mem,
+ os::raw::{c_char, c_double, c_long, c_ulong},
+};
+use tokio::runtime::Builder;
+use tokio::runtime::Runtime;
+
+/// The struct represents the response of the command.
+///
+/// It will have one of the value populated depending on the return type of the command.
+///
+/// The struct is freed by the external caller by using `free_command_response` to avoid memory leaks.
+/// TODO: Add a type enum to validate what type of response is being sent in the CommandResponse.
+#[repr(C)]
+#[derive(Debug)]
+pub struct CommandResponse {
+ response_type: ResponseType,
+ int_value: c_long,
+ float_value: c_double,
+ bool_value: bool,
+
+ /// Below two values are related to each other.
+ /// `string_value` represents the string.
+ /// `string_value_len` represents the length of the string.
+ string_value: *mut c_char,
+ string_value_len: c_long,
+
+ /// Below two values are related to each other.
+ /// `array_value` represents the array of CommandResponse.
+ /// `array_value_len` represents the length of the array.
+ array_value: *mut CommandResponse,
+ array_value_len: c_long,
+
+ /// Below two values represent the Map structure inside CommandResponse.
+ /// The map is transformed into an array of (map_key: CommandResponse, map_value: CommandResponse) and passed to Go.
+ /// These are represented as pointers as the map can be null (optionally present).
+ map_key: *mut CommandResponse,
+ map_value: *mut CommandResponse,
+
+ /// Below two values are related to each other.
+ /// `sets_value` represents the set of CommandResponse.
+ /// `sets_value_len` represents the length of the set.
+ sets_value: *mut CommandResponse,
+ sets_value_len: c_long,
+}
+
+impl Default for CommandResponse {
+ fn default() -> Self {
+ CommandResponse {
+ response_type: ResponseType::default(),
+ int_value: 0,
+ float_value: 0.0,
+ bool_value: false,
+ string_value: std::ptr::null_mut(),
+ string_value_len: 0,
+ array_value: std::ptr::null_mut(),
+ array_value_len: 0,
+ map_key: std::ptr::null_mut(),
+ map_value: std::ptr::null_mut(),
+ sets_value: std::ptr::null_mut(),
+ sets_value_len: 0,
+ }
+ }
+}
+
+#[repr(C)]
+#[derive(Debug, Default)]
+pub enum ResponseType {
+ #[default]
+ Null = 0,
+ Int = 1,
+ Float = 2,
+ Bool = 3,
+ String = 4,
+ Array = 5,
+ Map = 6,
+ Sets = 7,
+}
+
+/// Success callback that is called when a command succeeds.
+///
+/// The success callback needs to copy the given string synchronously, since it will be dropped by Rust once the callback returns. The callback should be offloaded to a separate thread in order not to exhaust the client's thread pool.
+///
+/// `index_ptr` is a baton-pass back to the caller language to uniquely identify the promise.
+/// `message` is the value returned by the command. The 'message' is managed by Rust and is freed when the callback returns control back to the caller.
+pub type SuccessCallback =
+ unsafe extern "C" fn(index_ptr: usize, message: *const CommandResponse) -> ();
+
+/// Failure callback that is called when a command fails.
+///
+/// The failure callback needs to copy the given string synchronously, since it will be dropped by Rust once the callback returns. The callback should be offloaded to a separate thread in order not to exhaust the client's thread pool.
+///
+/// `index_ptr` is a baton-pass back to the caller language to uniquely identify the promise.
+/// `error_message` is the error message returned by server for the failed command. The 'error_message' is managed by Rust and is freed when the callback returns control back to the caller.
+/// `error_type` is the type of error returned by glide-core, depending on the `RedisError` returned.
+pub type FailureCallback = unsafe extern "C" fn(
+ index_ptr: usize,
+ error_message: *const c_char,
+ error_type: RequestErrorType,
+) -> ();
+
+/// The connection response.
+///
+/// It contains either a connection or an error. It is represented as a struct instead of a union for ease of use in the wrapper language.
+///
+/// The struct is freed by the external caller by using `free_connection_response` to avoid memory leaks.
+#[repr(C)]
+pub struct ConnectionResponse {
+ conn_ptr: *const c_void,
+ connection_error_message: *const c_char,
+}
+
+/// A `GlideClient` adapter.
+// TODO: Remove allow(dead_code) once connection logic is implemented
+#[allow(dead_code)]
+pub struct ClientAdapter {
+ client: GlideClient,
+ success_callback: SuccessCallback,
+ failure_callback: FailureCallback,
+ runtime: Runtime,
+}
+
+fn create_client_internal(
+ connection_request_bytes: &[u8],
+ success_callback: SuccessCallback,
+ failure_callback: FailureCallback,
+) -> Result {
+ let request = connection_request::ConnectionRequest::parse_from_bytes(connection_request_bytes)
+ .map_err(|err| err.to_string())?;
+ // TODO: optimize this using multiple threads instead of a single worker thread (e.g. by pinning each go thread to a rust thread)
+ let runtime = Builder::new_multi_thread()
+ .enable_all()
+ .worker_threads(1)
+ .thread_name("Valkey-GLIDE Go thread")
+ .build()
+ .map_err(|err| {
+ let redis_error = err.into();
+ errors::error_message(&redis_error)
+ })?;
+ let client = runtime
+ .block_on(GlideClient::new(ConnectionRequest::from(request), None))
+ .map_err(|err| err.to_string())?;
+ Ok(ClientAdapter {
+ client,
+ success_callback,
+ failure_callback,
+ runtime,
+ })
+}
+
+/// Creates a new `ClientAdapter` with a new `GlideClient` configured using a Protobuf `ConnectionRequest`.
+///
+/// The returned `ConnectionResponse` will only be freed by calling [`free_connection_response`].
+///
+/// `connection_request_bytes` is an array of bytes that will be parsed into a Protobuf `ConnectionRequest` object.
+/// `connection_request_len` is the number of bytes in `connection_request_bytes`.
+/// `success_callback` is the callback that will be called when a command succeeds.
+/// `failure_callback` is the callback that will be called when a command fails.
+///
+/// # Safety
+///
+/// * `connection_request_bytes` must point to `connection_request_len` consecutive properly initialized bytes. It must be a well-formed Protobuf `ConnectionRequest` object. The array must be allocated by the caller and subsequently freed by the caller after this function returns.
+/// * `connection_request_len` must not be greater than the length of the connection request bytes array. It must also not be greater than the max value of a signed pointer-sized integer.
+/// * The `conn_ptr` pointer in the returned `ConnectionResponse` must live while the client is open/active and must be explicitly freed by calling [`close_client`].
+/// * The `connection_error_message` pointer in the returned `ConnectionResponse` must live until the returned `ConnectionResponse` pointer is passed to [`free_connection_response`].
+/// * Both the `success_callback` and `failure_callback` function pointers need to live while the client is open/active. The caller is responsible for freeing both callbacks.
+// TODO: Consider making this async
+#[no_mangle]
+pub unsafe extern "C" fn create_client(
+ connection_request_bytes: *const u8,
+ connection_request_len: usize,
+ success_callback: SuccessCallback,
+ failure_callback: FailureCallback,
+) -> *const ConnectionResponse {
+ let request_bytes =
+ unsafe { std::slice::from_raw_parts(connection_request_bytes, connection_request_len) };
+ let response = match create_client_internal(request_bytes, success_callback, failure_callback) {
+ Err(err) => ConnectionResponse {
+ conn_ptr: std::ptr::null(),
+ connection_error_message: CString::into_raw(
+ CString::new(err).expect("Couldn't convert error message to CString"),
+ ),
+ },
+ Ok(client) => ConnectionResponse {
+ conn_ptr: Box::into_raw(Box::new(client)) as *const c_void,
+ connection_error_message: std::ptr::null(),
+ },
+ };
+ Box::into_raw(Box::new(response))
+}
+
+/// Closes the given `GlideClient`, freeing it from the heap.
+///
+/// `client_adapter_ptr` is a pointer to a valid `GlideClient` returned in the `ConnectionResponse` from [`create_client`].
+///
+/// # Panics
+///
+/// This function panics when called with a null `client_adapter_ptr`.
+///
+/// # Safety
+///
+/// * `close_client` can only be called once per client. Calling it twice is undefined behavior, since the address will be freed twice.
+/// * `close_client` must be called after `free_connection_response` has been called to avoid creating a dangling pointer in the `ConnectionResponse`.
+/// * `client_adapter_ptr` must be obtained from the `ConnectionResponse` returned from [`create_client`].
+/// * `client_adapter_ptr` must be valid until `close_client` is called.
+// TODO: Ensure safety when command has not completed yet
+#[no_mangle]
+pub unsafe extern "C" fn close_client(client_adapter_ptr: *const c_void) {
+ assert!(!client_adapter_ptr.is_null());
+ drop(unsafe { Box::from_raw(client_adapter_ptr as *mut ClientAdapter) });
+}
+
+/// Deallocates a `ConnectionResponse`.
+///
+/// This function also frees the contained error. If the contained error is a null pointer, the function returns and only the `ConnectionResponse` is freed.
+///
+/// # Panics
+///
+/// This function panics when called with a null `ConnectionResponse` pointer.
+///
+/// # Safety
+///
+/// * `free_connection_response` can only be called once per `ConnectionResponse`. Calling it twice is undefined behavior, since the address will be freed twice.
+/// * `connection_response_ptr` must be obtained from the `ConnectionResponse` returned from [`create_client`].
+/// * `connection_response_ptr` must be valid until `free_connection_response` is called.
+/// * The contained `connection_error_message` must be obtained from the `ConnectionResponse` returned from [`create_client`].
+/// * The contained `connection_error_message` must be valid until `free_connection_response` is called and it must outlive the `ConnectionResponse` that contains it.
+#[no_mangle]
+pub unsafe extern "C" fn free_connection_response(
+ connection_response_ptr: *mut ConnectionResponse,
+) {
+ assert!(!connection_response_ptr.is_null());
+ let connection_response = unsafe { Box::from_raw(connection_response_ptr) };
+ let connection_error_message = connection_response.connection_error_message;
+ drop(connection_response);
+ if !connection_error_message.is_null() {
+ drop(unsafe { CString::from_raw(connection_error_message as *mut c_char) });
+ }
+}
+
+/// Provides the string mapping for the ResponseType enum.
+#[no_mangle]
+pub extern "C" fn get_response_type_string(response_type: ResponseType) -> *mut c_char {
+ let s = match response_type {
+ ResponseType::Null => "Null",
+ ResponseType::Int => "Int",
+ ResponseType::Float => "Float",
+ ResponseType::Bool => "Bool",
+ ResponseType::String => "String",
+ ResponseType::Array => "Array",
+ ResponseType::Map => "Map",
+ ResponseType::Sets => "Sets",
+ };
+ let c_str = CString::new(s).unwrap_or_default();
+ c_str.into_raw()
+}
+
+/// Deallocates a string generated via get_response_type_string.
+///
+/// # Safety
+/// free_response_type_string can be called only once per response_string.
+#[no_mangle]
+pub extern "C" fn free_response_type_string(response_string: *mut c_char) {
+ if !response_string.is_null() {
+ drop(unsafe { CString::from_raw(response_string as *mut c_char) });
+ }
+}
+
+/// Deallocates a `CommandResponse`.
+///
+/// This function also frees the contained string_value and array_value. If the string_value and array_value are null pointers, the function returns and only the `CommandResponse` is freed.
+///
+/// # Safety
+///
+/// * `free_command_response` can only be called once per `CommandResponse`. Calling it twice is undefined behavior, since the address will be freed twice.
+/// * `command_response_ptr` must be obtained from the `CommandResponse` returned in [`SuccessCallback`] from [`command`].
+/// * `command_response_ptr` must be valid until `free_command_response` is called.
+#[no_mangle]
+pub unsafe extern "C" fn free_command_response(command_response_ptr: *mut CommandResponse) {
+ if !command_response_ptr.is_null() {
+ let command_response = unsafe { Box::from_raw(command_response_ptr) };
+ free_command_response_elements(*command_response);
+ }
+}
+
+/// Frees the nested elements of `CommandResponse`.
+/// TODO: Add a test case to check for memory leak.
+///
+/// # Safety
+///
+/// * `free_command_response_elements` can only be called once per `CommandResponse`. Calling it twice is undefined behavior, since the address will be freed twice.
+/// * The contained `string_value` must be obtained from the `CommandResponse` returned in [`SuccessCallback`] from [`command`].
+/// * The contained `string_value` must be valid until `free_command_response` is called and it must outlive the `CommandResponse` that contains it.
+/// * The contained `array_value` must be obtained from the `CommandResponse` returned in [`SuccessCallback`] from [`command`].
+/// * The contained `array_value` must be valid until `free_command_response` is called and it must outlive the `CommandResponse` that contains it.
+/// * The contained `map_key` must be obtained from the `CommandResponse` returned in [`SuccessCallback`] from [`command`].
+/// * The contained `map_key` must be valid until `free_command_response` is called and it must outlive the `CommandResponse` that contains it.
+/// * The contained `map_value` must be obtained from the `CommandResponse` returned in [`SuccessCallback`] from [`command`].
+/// * The contained `map_value` must be valid until `free_command_response` is called and it must outlive the `CommandResponse` that contains it.
+fn free_command_response_elements(command_response: CommandResponse) {
+ let string_value = command_response.string_value;
+ let string_value_len = command_response.string_value_len;
+ let array_value = command_response.array_value;
+ let array_value_len = command_response.array_value_len;
+ let map_key = command_response.map_key;
+ let map_value = command_response.map_value;
+ let sets_value = command_response.sets_value;
+ let sets_value_len = command_response.sets_value_len;
+ if !string_value.is_null() {
+ let len = string_value_len as usize;
+ unsafe { Vec::from_raw_parts(string_value, len, len) };
+ }
+ if !array_value.is_null() {
+ let len = array_value_len as usize;
+ let vec = unsafe { Vec::from_raw_parts(array_value, len, len) };
+ for element in vec.into_iter() {
+ free_command_response_elements(element);
+ }
+ }
+ if !map_key.is_null() {
+ unsafe { free_command_response(map_key) };
+ }
+ if !map_value.is_null() {
+ unsafe { free_command_response(map_value) };
+ }
+ if !sets_value.is_null() {
+ let len = sets_value_len as usize;
+ let vec = unsafe { Vec::from_raw_parts(sets_value, len, len) };
+ for element in vec.into_iter() {
+ free_command_response_elements(element);
+ }
+ }
+}
+
+/// Frees the error_message received on a command failure.
+/// TODO: Add a test case to check for memory leak.
+///
+/// # Panics
+///
+/// This functions panics when called with a null `c_char` pointer.
+///
+/// # Safety
+///
+/// `free_error_message` can only be called once per `error_message`. Calling it twice is undefined
+/// behavior, since the address will be freed twice.
+#[no_mangle]
+pub unsafe extern "C" fn free_error_message(error_message: *mut c_char) {
+ assert!(!error_message.is_null());
+ drop(unsafe { CString::from_raw(error_message as *mut c_char) });
+}
+
+/// Converts a double pointer to a vec.
+///
+/// # Safety
+///
+/// `convert_double_pointer_to_vec` returns a `Vec` of u8 slice which holds pointers of `go`
+/// strings. The returned `Vec<&'a [u8]>` is meant to be copied into Rust code. Storing them
+/// for later use will cause the program to crash as the pointers will be freed by go's gc
+unsafe fn convert_double_pointer_to_vec<'a>(
+ data: *const *const c_void,
+ len: c_ulong,
+ data_len: *const c_ulong,
+) -> Vec<&'a [u8]> {
+ let string_ptrs = unsafe { from_raw_parts(data, len as usize) };
+ let string_lengths = unsafe { from_raw_parts(data_len, len as usize) };
+ let mut result = Vec::<&[u8]>::with_capacity(string_ptrs.len());
+ for (i, &str_ptr) in string_ptrs.iter().enumerate() {
+ let slice = unsafe { from_raw_parts(str_ptr as *const u8, string_lengths[i] as usize) };
+ result.push(slice);
+ }
+ result
+}
+
+fn convert_vec_to_pointer(mut vec: Vec) -> (*mut T, c_long) {
+ vec.shrink_to_fit();
+ let vec_ptr = vec.as_mut_ptr();
+ let len = vec.len() as c_long;
+ mem::forget(vec);
+ (vec_ptr, len)
+}
+
+/// TODO: Avoid the use of expect and unwrap in the code and add a common error handling mechanism.
+fn valkey_value_to_command_response(value: Value) -> RedisResult {
+ let mut command_response = CommandResponse::default();
+ let result: RedisResult = match value {
+ Value::Nil => Ok(command_response),
+ Value::SimpleString(text) => {
+ let vec: Vec = text.into_bytes();
+ let (vec_ptr, len) = convert_vec_to_pointer(vec);
+ command_response.string_value = vec_ptr as *mut c_char;
+ command_response.string_value_len = len;
+ command_response.response_type = ResponseType::String;
+ Ok(command_response)
+ }
+ Value::BulkString(text) => {
+ let (vec_ptr, len) = convert_vec_to_pointer(text);
+ command_response.string_value = vec_ptr as *mut c_char;
+ command_response.string_value_len = len;
+ command_response.response_type = ResponseType::String;
+ Ok(command_response)
+ }
+ Value::VerbatimString { format: _, text } => {
+ let vec: Vec = text.into_bytes();
+ let (vec_ptr, len) = convert_vec_to_pointer(vec);
+ command_response.string_value = vec_ptr as *mut c_char;
+ command_response.string_value_len = len;
+ command_response.response_type = ResponseType::String;
+ Ok(command_response)
+ }
+ Value::Okay => {
+ let vec: Vec = String::from("OK").into_bytes();
+ let (vec_ptr, len) = convert_vec_to_pointer(vec);
+ command_response.string_value = vec_ptr as *mut c_char;
+ command_response.string_value_len = len;
+ command_response.response_type = ResponseType::String;
+ Ok(command_response)
+ }
+ Value::Int(num) => {
+ command_response.int_value = num;
+ command_response.response_type = ResponseType::Int;
+ Ok(command_response)
+ }
+ Value::Double(num) => {
+ command_response.float_value = num;
+ command_response.response_type = ResponseType::Float;
+ Ok(command_response)
+ }
+ Value::Boolean(boolean) => {
+ command_response.bool_value = boolean;
+ command_response.response_type = ResponseType::Bool;
+ Ok(command_response)
+ }
+ Value::Array(array) => {
+ let vec: Vec = array
+ .into_iter()
+ .map(|v| {
+ valkey_value_to_command_response(v)
+ .expect("Value couldn't be converted to CommandResponse")
+ })
+ .collect();
+ let (vec_ptr, len) = convert_vec_to_pointer(vec);
+ command_response.array_value = vec_ptr;
+ command_response.array_value_len = len;
+ command_response.response_type = ResponseType::Array;
+ Ok(command_response)
+ }
+ Value::Map(map) => {
+ let result: Vec = map
+ .into_iter()
+ .map(|(key, val)| {
+ let mut map_response = CommandResponse::default();
+
+ let map_key = valkey_value_to_command_response(key)
+ .expect("Value couldn't be converted to CommandResponse");
+ map_response.map_key = Box::into_raw(Box::new(map_key));
+
+ let map_val = valkey_value_to_command_response(val)
+ .expect("Value couldn't be converted to CommandResponse");
+ map_response.map_value = Box::into_raw(Box::new(map_val));
+
+ map_response
+ })
+ .collect::>();
+
+ let (vec_ptr, len) = convert_vec_to_pointer(result);
+ command_response.array_value = vec_ptr;
+ command_response.array_value_len = len;
+ command_response.response_type = ResponseType::Map;
+ Ok(command_response)
+ }
+ Value::Set(array) => {
+ let vec: Vec = array
+ .into_iter()
+ .map(|v| {
+ valkey_value_to_command_response(v)
+ .expect("Value couldn't be converted to CommandResponse")
+ })
+ .collect();
+ let (vec_ptr, len) = convert_vec_to_pointer(vec);
+ command_response.sets_value = vec_ptr;
+ command_response.sets_value_len = len;
+ command_response.response_type = ResponseType::Sets;
+ Ok(command_response)
+ }
+ // TODO: Add support for other return types.
+ _ => todo!(),
+ };
+ result
+}
+
+// TODO: Finish documentation
+/// Executes a command.
+///
+/// # Safety
+///
+/// * TODO: finish safety section.
+#[no_mangle]
+pub unsafe extern "C" fn command(
+ client_adapter_ptr: *const c_void,
+ channel: usize,
+ command_type: RequestType,
+ arg_count: c_ulong,
+ args: *const usize,
+ args_len: *const c_ulong,
+ route_bytes: *const u8,
+ route_bytes_len: usize,
+) {
+ let client_adapter =
+ unsafe { Box::leak(Box::from_raw(client_adapter_ptr as *mut ClientAdapter)) };
+ // The safety of this needs to be ensured by the calling code. Cannot dispose of the pointer before
+ // all operations have completed.
+ let ptr_address = client_adapter_ptr as usize;
+
+ let arg_vec =
+ unsafe { convert_double_pointer_to_vec(args as *const *const c_void, arg_count, args_len) };
+
+ let mut client_clone = client_adapter.client.clone();
+
+ // Create the command outside of the task to ensure that the command arguments passed
+ // from "go" are still valid
+ let mut cmd = command_type
+ .get_command()
+ .expect("Couldn't fetch command type");
+ for command_arg in arg_vec {
+ cmd.arg(command_arg);
+ }
+
+ let r_bytes = unsafe { std::slice::from_raw_parts(route_bytes, route_bytes_len) };
+
+ let route = Routes::parse_from_bytes(r_bytes).unwrap();
+
+ client_adapter.runtime.spawn(async move {
+ let result = client_clone
+ .send_command(&cmd, get_route(route, Some(&cmd)))
+ .await;
+ let client_adapter = unsafe { Box::leak(Box::from_raw(ptr_address as *mut ClientAdapter)) };
+ let value = match result {
+ Ok(value) => value,
+ Err(err) => {
+ let message = errors::error_message(&err);
+ let error_type = errors::error_type(&err);
+
+ let c_err_str = CString::into_raw(
+ CString::new(message).expect("Couldn't convert error message to CString"),
+ );
+ unsafe { (client_adapter.failure_callback)(channel, c_err_str, error_type) };
+ return;
+ }
+ };
+
+ let result: RedisResult = valkey_value_to_command_response(value);
+
+ unsafe {
+ match result {
+ Ok(message) => {
+ (client_adapter.success_callback)(channel, Box::into_raw(Box::new(message)))
+ }
+ Err(err) => {
+ let message = errors::error_message(&err);
+ let error_type = errors::error_type(&err);
+
+ let c_err_str = CString::into_raw(
+ CString::new(message).expect("Couldn't convert error message to CString"),
+ );
+ (client_adapter.failure_callback)(channel, c_err_str, error_type);
+ }
+ };
+ }
+ });
+}
+
+fn get_route(route: Routes, cmd: Option<&Cmd>) -> Option {
+ use glide_core::command_request::routes::Value;
+ let route = route.value?;
+ let get_response_policy = |cmd: Option<&Cmd>| {
+ cmd.and_then(|cmd| {
+ cmd.command()
+ .and_then(|cmd| ResponsePolicy::for_command(&cmd))
+ })
+ };
+ match route {
+ Value::SimpleRoutes(simple_route) => {
+ let simple_route = simple_route.enum_value().unwrap();
+ match simple_route {
+ SimpleRoutes::AllNodes => Some(RoutingInfo::MultiNode((
+ MultipleNodeRoutingInfo::AllNodes,
+ get_response_policy(cmd),
+ ))),
+ SimpleRoutes::AllPrimaries => Some(RoutingInfo::MultiNode((
+ MultipleNodeRoutingInfo::AllMasters,
+ get_response_policy(cmd),
+ ))),
+ SimpleRoutes::Random => {
+ Some(RoutingInfo::SingleNode(SingleNodeRoutingInfo::Random))
+ }
+ }
+ }
+ Value::SlotKeyRoute(slot_key_route) => Some(RoutingInfo::SingleNode(
+ SingleNodeRoutingInfo::SpecificNode(Route::new(
+ redis::cluster_topology::get_slot(slot_key_route.slot_key.as_bytes()),
+ get_slot_addr(&slot_key_route.slot_type),
+ )),
+ )),
+ Value::SlotIdRoute(slot_id_route) => Some(RoutingInfo::SingleNode(
+ SingleNodeRoutingInfo::SpecificNode(Route::new(
+ slot_id_route.slot_id as u16,
+ get_slot_addr(&slot_id_route.slot_type),
+ )),
+ )),
+ Value::ByAddressRoute(by_address_route) => match u16::try_from(by_address_route.port) {
+ Ok(port) => Some(RoutingInfo::SingleNode(SingleNodeRoutingInfo::ByAddress {
+ host: by_address_route.host.to_string(),
+ port,
+ })),
+ Err(_) => {
+ // TODO: Handle error propagation.
+ None
+ }
+ },
+ _ => panic!("unknown route type"),
+ }
+}
+
+fn get_slot_addr(slot_type: &protobuf::EnumOrUnknown) -> SlotAddr {
+ slot_type
+ .enum_value()
+ .map(|slot_type| match slot_type {
+ SlotTypes::Primary => SlotAddr::Master,
+ SlotTypes::Replica => SlotAddr::ReplicaRequired,
+ })
+ .expect("Received unexpected slot id type")
+}
diff --git a/python/python/glide/__init__.py b/python/python/glide/__init__.py
index 8f6ceac47b..7b3bcbb253 100644
--- a/python/python/glide/__init__.py
+++ b/python/python/glide/__init__.py
@@ -151,6 +151,7 @@
)
from glide.glide_client import GlideClient, GlideClusterClient, TGlideClient
from glide.logger import Level as LogLevel
+from glide.glide_async_ffi_client import GlideAsync
from glide.logger import Logger
from glide.routes import (
AllNodes,
@@ -162,13 +163,17 @@
SlotKeyRoute,
SlotType,
)
-
+from glide.glide_sync_client import GlideSync
from .glide import ClusterScanCursor, Script
-
+from glide.glide_client_sync_uds import UDSGlideClientSync, UDSGlideClusterClientSync
PubSubMsg = CoreCommands.PubSubMsg
__all__ = [
# Client
+ "UDSGlideClientSync",
+ "UDSGlideClusterClientSync",
+ "GlideAsync",
+ "GlideSync",
"GlideClient",
"GlideClusterClient",
"Transaction",
diff --git a/python/python/glide/constants.py b/python/python/glide/constants.py
index 9740ac8cf6..34fdaacabe 100644
--- a/python/python/glide/constants.py
+++ b/python/python/glide/constants.py
@@ -6,7 +6,7 @@
from glide.protobuf.connection_request_pb2 import ConnectionRequest
from glide.routes import ByAddressRoute, RandomNode, SlotIdRoute, SlotKeyRoute
-OK: str = "OK"
+OK: str = b"OK"
DEFAULT_READ_BYTES_SIZE: int = pow(2, 16)
# Typing
T = TypeVar("T")
diff --git a/python/python/glide/glide_async_ffi_client.py b/python/python/glide/glide_async_ffi_client.py
new file mode 100644
index 0000000000..2f2cbc031a
--- /dev/null
+++ b/python/python/glide/glide_async_ffi_client.py
@@ -0,0 +1,257 @@
+import asyncio
+from cffi import FFI
+from glide.protobuf.command_request_pb2 import Command, CommandRequest, RequestType
+from typing import List, Union, Optional
+from glide.async_commands.core import CoreCommands
+from glide.constants import DEFAULT_READ_BYTES_SIZE, OK, TEncodable, TRequest, TResult
+from glide.routes import Route
+from glide.config import GlideClusterClientConfiguration, NodeAddress, GlideClientConfiguration
+
+class GlideAsync(CoreCommands):
+
+ def create_client(self, connection_request_bytes):
+ request_len = len(connection_request_bytes)
+ connection_response = self.lib.create_client(
+ self.ffi.from_buffer(connection_request_bytes),
+ request_len,
+ self.success_callback,
+ self.failure_callback
+ )
+ return connection_response
+
+ def _get_callback_index(self) -> int:
+ try:
+ return self._available_callback_indexes.pop()
+ except IndexError:
+ # The list is empty
+ return len(self._available_futures)
+
+ def _get_future(self, callback_idx: int) -> asyncio.Future:
+ response_future: asyncio.Future = asyncio.Future()
+ self._available_futures.update({callback_idx: response_future})
+ return response_future
+
+ def _resolve_future(self, index_ptr, parsed_response):
+ future = self._available_futures.get(index_ptr)
+ if future:
+ future.set_result(parsed_response)
+
+ def __init__(self, config = None):
+ self._init_ffi()
+ self._available_callback_indexes: List[int] = list()
+ self.loop = asyncio.get_event_loop()
+
+ # Define success and failure callbacks
+ @self.ffi.callback("void(size_t, const CommandResponse*)")
+ def success_callback(index_ptr, message):
+ if message == self.ffi.NULL:
+ print("No message provided in success callback.")
+ else:
+ parsed_response = self._handle_response(message)
+ index_ptr = int(index_ptr) # Ensure index_ptr is an integer
+ future = self._available_futures.get(index_ptr)
+ if future:
+ self.loop.call_soon_threadsafe(
+ self._resolve_future, index_ptr, parsed_response
+ )
+ else:
+ print(f"No future found for index: {index_ptr}")
+
+
+ @self.ffi.callback("void(size_t, const char*, int)")
+ def failure_callback(index_ptr, error_message, error_type):
+ error_msg = self.ffi.string(error_message).decode("utf-8") if error_message != self.ffi.NULL else "Unknown Error"
+ print(f"Failure callback called with index: {index_ptr}, error: {error_msg}, type: {error_type}")
+
+ self.success_callback = success_callback
+ self.failure_callback = failure_callback
+ # Call the `create_client` function
+ # config = GlideClusterClientConfiguration(NodeAddress("localhost", 6379))
+ config = GlideClientConfiguration([NodeAddress("localhost", 6379)]) if config is None else config
+ conn_req = config._create_a_protobuf_conn_request(cluster_mode=type(config) == GlideClusterClientConfiguration)
+ conn_req_bytes = conn_req.SerializeToString()
+ client_response_ptr = self.create_client(conn_req_bytes)
+ # Handle the connection response
+ if client_response_ptr != self.ffi.NULL:
+ client_response = self.ffi.cast("ConnectionResponse*", client_response_ptr)
+ if client_response.conn_ptr != self.ffi.NULL:
+ print("Client created successfully.")
+ self.core_client = client_response.conn_ptr
+ else:
+ error_message = self.ffi.string(client_response.connection_error_message).decode('utf-8') if client_response.connection_error_message != self.ffi.NULL else "Unknown error"
+ print(f"Failed to create client. Error: {error_message}")
+
+ # Free the connection response to avoid memory leaks
+ self.lib.free_connection_response(client_response_ptr)
+ else:
+ print("Failed to create client, response pointer is NULL.")
+ self._available_futures = {}
+
+ def _init_ffi(self):
+ self.ffi = FFI()
+
+ # Define the CommandResponse struct and related types
+ self.ffi.cdef("""
+ typedef struct CommandResponse {
+ int response_type;
+ long int_value;
+ double float_value;
+ bool bool_value;
+ char* string_value;
+ long string_value_len;
+ struct CommandResponse* array_value;
+ long array_value_len;
+ struct CommandResponse* map_key;
+ struct CommandResponse* map_value;
+ struct CommandResponse* sets_value;
+ long sets_value_len;
+ } CommandResponse;
+
+ typedef struct ConnectionResponse {
+ const void* conn_ptr;
+ const char* connection_error_message;
+ } ConnectionResponse;
+
+ typedef void (*SuccessCallback)(size_t index_ptr, const CommandResponse* message);
+ typedef void (*FailureCallback)(size_t index_ptr, const char* error_message, int error_type);
+
+ const ConnectionResponse* create_client(
+ const uint8_t* connection_request_bytes,
+ size_t connection_request_len,
+ SuccessCallback success_callback,
+ FailureCallback failure_callback
+ );
+
+ void free_command_response(CommandResponse* response);
+ void free_connection_response(ConnectionResponse* response);
+
+ void command(
+ const void* client_adapter_ptr,
+ size_t channel,
+ int command_type,
+ unsigned long arg_count,
+ const size_t* args,
+ const unsigned long* args_len,
+ const uint8_t* route_bytes,
+ size_t route_bytes_len
+ );
+ """)
+
+ # Load the shared library (adjust the path to your compiled Rust library)
+ self.lib = self.ffi.dlopen("/home/ubuntu/glide-for-redis/go/target/release/libglide_rs.so")
+ # debug
+ # self.lib = self.ffi.dlopen("/home/ubuntu/glide-for-redis/go/target/debug/libglide_rs.so")
+
+
+ def _handle_response(self, message):
+ if message == self.ffi.NULL:
+ print("Received NULL message.")
+ return None
+
+ # Identify the type of the message
+ message_type = self.ffi.typeof(message).cname
+
+ # If message is a pointer to CommandResponse, dereference it
+ if message_type == "CommandResponse *":
+ message = message[0] # Dereference the pointer
+ message_type = self.ffi.typeof(message).cname
+ # Check if message is now a CommandResponse
+ if message_type == "CommandResponse":
+ msg = message
+ if msg.response_type == 0: # Null
+ return None
+ elif msg.response_type == 1: # Int
+ return msg.int_value
+ elif msg.response_type == 2: # Float
+ return msg.float_value
+ elif msg.response_type == 3: # Bool
+ return bool(msg.bool_value)
+ elif msg.response_type == 4: # String
+ try:
+ string_value = self.ffi.buffer(msg.string_value, msg.string_value_len)[:]
+ return string_value
+ except Exception as e:
+ print(f"Error decoding string value: {e}")
+ elif msg.response_type == 5: # Array
+ array = []
+ for i in range(msg.array_value_len):
+ element = self.ffi.cast("struct CommandResponse*", msg.array_value + i)
+ array.append(self._handle_response(element))
+ return array
+ elif msg.response_type == 6: # Map
+ map_dict = {}
+ for i in range(msg.array_value_len):
+ key = self.ffi.cast("struct CommandResponse*", msg.map_key + i)
+ value = self.ffi.cast("struct CommandResponse*", msg.map_value + i)
+ map_dict[self._handle_response(key)] = self._handle_response(value)
+ return map_dict
+ elif msg.response_type == 7: # Sets
+ result_set = set()
+ sets_array = self.ffi.cast(f"struct CommandResponse[{msg.sets_value_len}]", msg.sets_value)
+ for i in range(msg.sets_value_len):
+ element = sets_array[i] # Already a struct
+ result_set.add(self._handle_response(element))
+ return result_set
+ else:
+ print(f"Unhandled response type = {msg.response_type}")
+ return None
+ else:
+ print(f"Unexpected message type: {message_type}")
+ return None
+
+ def _to_c_strings(self, args):
+ """Convert Python arguments to C-compatible pointers and lengths."""
+ c_strings = []
+ string_lengths = []
+ buffers = [] # Keep a reference to prevent premature garbage collection
+
+ for arg in args:
+ if isinstance(arg, str):
+ # Convert string to UTF-8 bytes
+ arg_bytes = arg.encode('utf-8')
+ elif isinstance(arg, (int, float)):
+ # Convert numeric values to strings and then to bytes
+ arg_bytes = str(arg).encode('utf-8')
+ else:
+ raise ValueError(f"Unsupported argument type: {type(arg)}")
+
+ # Use ffi.from_buffer for zero-copy conversion
+ buffers.append(arg_bytes) # Keep the byte buffer alive
+ c_strings.append(self.ffi.cast("size_t", self.ffi.from_buffer(arg_bytes)))
+ string_lengths.append(len(arg_bytes))
+
+ # Return C-compatible arrays and keep buffers alive
+ return (
+ self.ffi.new("size_t[]", c_strings),
+ self.ffi.new("unsigned long[]", string_lengths),
+ buffers, # Ensure buffers stay alive
+ )
+
+ async def _execute_command(
+ self,
+ request_type: RequestType.ValueType,
+ args: List[TEncodable],
+ route: Optional[Route] = None,
+ ) -> TResult:
+ client_adapter_ptr = self.core_client
+ if client_adapter_ptr == self.ffi.NULL:
+ raise ValueError("Invalid client pointer.")
+ callback_idx = self._get_callback_index()
+ response_future = self._get_future(callback_idx)
+ # Convert the arguments to C-compatible pointers
+ c_args, c_lengths, buffers = self._to_c_strings(args)
+ route_bytes= b""
+ c_route_bytes = self.ffi.from_buffer(route_bytes)
+ # Call the command function
+ self.lib.command(
+ client_adapter_ptr, # Client pointer
+ callback_idx, # Example channel (adjust as needed)
+ request_type, # Request type (e.g., GET or SET)
+ len(args), # Number of arguments
+ c_args, # Array of argument pointers
+ c_lengths, # Array of argument lengths
+ c_route_bytes,
+ len(route_bytes)
+ )
+ await response_future
+ return response_future.result()
diff --git a/python/python/glide/glide_client_sync_uds.py b/python/python/glide/glide_client_sync_uds.py
new file mode 100644
index 0000000000..5b053bc8cf
--- /dev/null
+++ b/python/python/glide/glide_client_sync_uds.py
@@ -0,0 +1,600 @@
+# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
+
+import asyncio
+import sys
+import threading
+from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
+import socket
+import time
+
+from glide.async_commands.cluster_commands import ClusterCommands
+from glide.async_commands.command_args import ObjectType
+from glide.sync_commands.core import CoreCommands
+from glide.async_commands.standalone_commands import StandaloneCommands
+from glide.config import BaseClientConfiguration, ServerCredentials
+from glide.constants import DEFAULT_READ_BYTES_SIZE, OK, TEncodable, TRequest, TResult
+from glide.exceptions import (
+ ClosingError,
+ ConfigurationError,
+ ConnectionError,
+ ExecAbortError,
+ RequestError,
+ TimeoutError,
+)
+from glide.logger import Level as LogLevel
+from glide.logger import Logger as ClientLogger
+from glide.protobuf.command_request_pb2 import Command, CommandRequest, RequestType
+from glide.protobuf.connection_request_pb2 import ConnectionRequest
+from glide.protobuf.response_pb2 import RequestErrorType, Response
+from glide.protobuf_codec import PartialMessageException, ProtobufCodec
+from glide.routes import Route, set_protobuf_route
+
+from .glide import (
+ DEFAULT_TIMEOUT_IN_MILLISECONDS,
+ MAX_REQUEST_ARGS_LEN,
+ ClusterScanCursor,
+ create_leaked_bytes_vec,
+ get_statistics,
+ start_socket_listener_external,
+ value_from_pointer,
+)
+
+if sys.version_info >= (3, 11):
+ import asyncio as async_timeout
+ from typing import Self
+else:
+ import async_timeout
+ from typing_extensions import Self
+
+
+def get_request_error_class(
+ error_type: Optional[RequestErrorType.ValueType],
+) -> Type[RequestError]:
+ if error_type == RequestErrorType.Disconnect:
+ return ConnectionError
+ if error_type == RequestErrorType.ExecAbort:
+ return ExecAbortError
+ if error_type == RequestErrorType.Timeout:
+ return TimeoutError
+ if error_type == RequestErrorType.Unspecified:
+ return RequestError
+ return RequestError
+
+
+class UDSBaseClient(CoreCommands):
+ def __init__(self, config: BaseClientConfiguration):
+ """
+ To create a new client, use the `create` classmethod
+ """
+ self.config: BaseClientConfiguration = config
+ self._available_futures: Dict[int, asyncio.Future] = {}
+ self._available_callback_indexes: List[int] = list()
+ self._buffered_requests: List[TRequest] = list()
+ self._writer_lock = threading.Lock()
+ self.socket_path: Optional[str] = None
+ self._reader_task: Optional[asyncio.Task] = None
+ self._is_closed: bool = False
+ self._pubsub_futures: List[asyncio.Future] = []
+ self._pubsub_lock = threading.Lock()
+ self._pending_push_notifications: List[Response] = list()
+
+ @classmethod
+ def create(cls, config: BaseClientConfiguration) -> Self:
+ """Creates a Glide client.
+
+ Args:
+ config (ClientConfiguration): The client configurations.
+ If no configuration is provided, a default client to "localhost":6379 will be created.
+
+ Returns:
+ Self: a Glide Client instance.
+ """
+ config = config
+ self = cls(config)
+
+ def init_callback(socket_path: Optional[str], err: Optional[str]):
+ nonlocal self
+ if err is not None:
+ raise ClosingError(err)
+ elif socket_path is None:
+ raise ClosingError(
+ "Socket initialization error: Missing valid socket path."
+ )
+ else:
+ # Received socket path
+ print("caliing notify")
+ self.socket_path = socket_path
+ self.socket_path = start_socket_listener_external(init_callback=init_callback)
+
+ # will log if the logger was created (wrapper or costumer) on info
+ # level or higher
+ # Wait for the socket listener to complete its initialization
+ # Wait for the socket listener to complete its initialization
+ while True: # Loop to handle spurious wakeups
+ ClientLogger.log(LogLevel.INFO, "connection info", "new connection established")
+ # Create UDS connection
+ try:
+ self._create_uds_connection()
+ print("socket is ready!")
+ except Exception as e:
+ print(f"got ex: {e}")
+ time.sleep(1)
+ continue
+ # Set the client configurations
+ self._set_connection_configurations()
+ return self
+
+ def _create_uds_connection(self) -> None:
+ try:
+ # Open a UDS connection
+ self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self._socket.settimeout(DEFAULT_TIMEOUT_IN_MILLISECONDS)
+ self._socket.connect(self.socket_path)
+ print("UDS connection created successfully.")
+ except Exception as e:
+ self.close()
+ raise RuntimeError(f"Failed to create UDS connection: {e}") from e
+
+ def __del__(self) -> None:
+ try:
+ if self._reader_task:
+ self._reader_task.cancel()
+ except RuntimeError as e:
+ if "no running event loop" in str(e):
+ # event loop already closed
+ pass
+
+ def close(self, err_message: Optional[str] = None) -> None:
+ """
+ Terminate the client by closing all associated resources, including the socket and any active futures.
+ All open futures will be closed with an exception.
+
+ Args:
+ err_message (Optional[str]): If not None, this error message will be passed along with the exceptions when closing all open futures.
+ Defaults to None.
+ """
+ self._is_closed = True
+ for response_future in self._available_futures.values():
+ if not response_future.done():
+ err_message = "" if err_message is None else err_message
+ response_future.set_exception(ClosingError(err_message))
+ try:
+ self._pubsub_lock.acquire()
+ for pubsub_future in self._pubsub_futures:
+ if not pubsub_future.done() and not pubsub_future.cancelled():
+ pubsub_future.set_exception(ClosingError(""))
+ finally:
+ self._pubsub_lock.release()
+
+ self._socket.close()
+ self.__del__()
+
+ def _get_future(self, callback_idx: int) -> asyncio.Future:
+ response_future: asyncio.Future = asyncio.Future()
+ self._available_futures.update({callback_idx: response_future})
+ return response_future
+
+ def _get_protobuf_conn_request(self) -> ConnectionRequest:
+ return self.config._create_a_protobuf_conn_request()
+
+ def _set_connection_configurations(self) -> None:
+ conn_request = self._get_protobuf_conn_request()
+ print(f"conn_request= {conn_request}")
+ self._write_or_buffer_request(conn_request)
+ result = self._read_response()
+ if result is not OK:
+ raise ClosingError(result)
+
+ def _create_write_task(self, request: TRequest):
+ self._write_or_buffer_request(request)
+
+ def _write_or_buffer_request(self, request: TRequest):
+ self._buffered_requests.append(request)
+ if self._writer_lock.acquire(False):
+ try:
+ while len(self._buffered_requests) > 0:
+ return self._write_buffered_requests_to_socket()
+
+ finally:
+ self._writer_lock.release()
+
+ def _write_buffered_requests_to_socket(self) -> None:
+ requests = self._buffered_requests
+ self._buffered_requests = list()
+ b_arr = bytearray()
+ for request in requests:
+ ProtobufCodec.encode_delimited(b_arr, request)
+ self._socket.sendall(b_arr)
+
+ def _encode_arg(self, arg: TEncodable) -> bytes:
+ """
+ Converts a string argument to bytes.
+
+ Args:
+ arg (str): An encodable argument.
+
+ Returns:
+ bytes: The encoded argument as bytes.
+ """
+ if isinstance(arg, str):
+ # TODO: Allow passing different encoding options
+ return bytes(arg, encoding="utf8")
+ return arg
+
+ def _encode_and_sum_size(
+ self,
+ args_list: Optional[List[TEncodable]],
+ ) -> Tuple[List[bytes], int]:
+ """
+ Encodes the list and calculates the total memory size.
+
+ Args:
+ args_list (Optional[List[TEncodable]]): A list of strings to be converted to bytes.
+ If None or empty, returns ([], 0).
+
+ Returns:
+ int: The total memory size of the encoded arguments in bytes.
+ """
+ args_size = 0
+ encoded_args_list: List[bytes] = []
+ if not args_list:
+ return (encoded_args_list, args_size)
+ for arg in args_list:
+ encoded_arg = self._encode_arg(arg) if isinstance(arg, str) else arg
+ encoded_args_list.append(encoded_arg)
+ args_size += sys.getsizeof(encoded_arg)
+ return (encoded_args_list, args_size)
+
+ def _execute_command(
+ self,
+ request_type: RequestType.ValueType,
+ args: List[TEncodable],
+ route: Optional[Route] = None,
+ ) -> TResult:
+ if self._is_closed:
+ raise ClosingError(
+ "Unable to execute requests; the client is closed. Please create a new client."
+ )
+ request = CommandRequest()
+ request.callback_idx = self._get_callback_index()
+ request.single_command.request_type = request_type
+ request.single_command.args_array.args[:] = [
+ bytes(elem, encoding="utf8") if isinstance(elem, str) else elem
+ for elem in args
+ ]
+ (encoded_args, args_size) = self._encode_and_sum_size(args)
+ if args_size < MAX_REQUEST_ARGS_LEN:
+ request.single_command.args_array.args[:] = encoded_args
+ else:
+ request.single_command.args_vec_pointer = create_leaked_bytes_vec(
+ encoded_args
+ )
+ set_protobuf_route(request, route)
+ return self._write_request_await_response(request)
+
+ async def _execute_transaction(
+ self,
+ commands: List[Tuple[RequestType.ValueType, List[TEncodable]]],
+ route: Optional[Route] = None,
+ ) -> List[TResult]:
+ if self._is_closed:
+ raise ClosingError(
+ "Unable to execute requests; the client is closed. Please create a new client."
+ )
+ request = CommandRequest()
+ request.callback_idx = self._get_callback_index()
+ transaction_commands = []
+ for requst_type, args in commands:
+ command = Command()
+ command.request_type = requst_type
+ # For now, we allow the user to pass the command as array of strings
+ # we convert them here into bytes (the datatype that our rust core expects)
+ (encoded_args, args_size) = self._encode_and_sum_size(args)
+ if args_size < MAX_REQUEST_ARGS_LEN:
+ command.args_array.args[:] = encoded_args
+ else:
+ command.args_vec_pointer = create_leaked_bytes_vec(encoded_args)
+ transaction_commands.append(command)
+ request.transaction.commands.extend(transaction_commands)
+ set_protobuf_route(request, route)
+ return self._write_request_await_response(request)
+
+ async def _execute_script(
+ self,
+ hash: str,
+ keys: Optional[List[Union[str, bytes]]] = None,
+ args: Optional[List[Union[str, bytes]]] = None,
+ route: Optional[Route] = None,
+ ) -> TResult:
+ if self._is_closed:
+ raise ClosingError(
+ "Unable to execute requests; the client is closed. Please create a new client."
+ )
+ request = CommandRequest()
+ request.callback_idx = self._get_callback_index()
+ (encoded_keys, keys_size) = self._encode_and_sum_size(keys)
+ (encoded_args, args_size) = self._encode_and_sum_size(args)
+ if (keys_size + args_size) < MAX_REQUEST_ARGS_LEN:
+ request.script_invocation.hash = hash
+ request.script_invocation.keys[:] = encoded_keys
+ request.script_invocation.args[:] = encoded_args
+
+ else:
+ request.script_invocation_pointers.hash = hash
+ request.script_invocation_pointers.keys_pointer = create_leaked_bytes_vec(
+ encoded_keys
+ )
+ request.script_invocation_pointers.args_pointer = create_leaked_bytes_vec(
+ encoded_args
+ )
+ set_protobuf_route(request, route)
+ return self._write_request_await_response(request)
+
+ async def get_pubsub_message(self) -> CoreCommands.PubSubMsg:
+ if self._is_closed:
+ raise ClosingError(
+ "Unable to execute requests; the client is closed. Please create a new client."
+ )
+
+ if not self.config._is_pubsub_configured():
+ raise ConfigurationError(
+ "The operation will never complete since there was no pubsub subscriptions applied to the client."
+ )
+
+ if self.config._get_pubsub_callback_and_context()[0] is not None:
+ raise ConfigurationError(
+ "The operation will never complete since messages will be passed to the configured callback."
+ )
+
+ # locking might not be required
+ response_future: asyncio.Future = asyncio.Future()
+ try:
+ self._pubsub_lock.acquire()
+ self._pubsub_futures.append(response_future)
+ self._complete_pubsub_futures_safe()
+ finally:
+ self._pubsub_lock.release()
+ return await response_future
+
+ def try_get_pubsub_message(self) -> Optional[CoreCommands.PubSubMsg]:
+ if self._is_closed:
+ raise ClosingError(
+ "Unable to execute requests; the client is closed. Please create a new client."
+ )
+
+ if not self.config._is_pubsub_configured():
+ raise ConfigurationError(
+ "The operation will never succeed since there was no pubsbub subscriptions applied to the client."
+ )
+
+ if self.config._get_pubsub_callback_and_context()[0] is not None:
+ raise ConfigurationError(
+ "The operation will never succeed since messages will be passed to the configured callback."
+ )
+
+ # locking might not be required
+ msg: Optional[CoreCommands.PubSubMsg] = None
+ try:
+ self._pubsub_lock.acquire()
+ self._complete_pubsub_futures_safe()
+ while len(self._pending_push_notifications) and not msg:
+ push_notification = self._pending_push_notifications.pop(0)
+ msg = self._notification_to_pubsub_message_safe(push_notification)
+ finally:
+ self._pubsub_lock.release()
+ return msg
+
+ def _cancel_pubsub_futures_with_exception_safe(self, exception: ConnectionError):
+ while len(self._pubsub_futures):
+ next_future = self._pubsub_futures.pop(0)
+ if not next_future.cancelled():
+ next_future.set_exception(exception)
+
+ def _notification_to_pubsub_message_safe(
+ self, response: Response
+ ) -> Optional[CoreCommands.PubSubMsg]:
+ pubsub_message = None
+ push_notification = cast(
+ Dict[str, Any], value_from_pointer(response.resp_pointer)
+ )
+ message_kind = push_notification["kind"]
+ if message_kind == "Disconnection":
+ ClientLogger.log(
+ LogLevel.WARN,
+ "disconnect notification",
+ "Transport disconnected, messages might be lost",
+ )
+ elif (
+ message_kind == "Message"
+ or message_kind == "PMessage"
+ or message_kind == "SMessage"
+ ):
+ values: List = push_notification["values"]
+ if message_kind == "PMessage":
+ pubsub_message = UDSBaseClient.PubSubMsg(
+ message=values[2], channel=values[1], pattern=values[0]
+ )
+ else:
+ pubsub_message = UDSBaseClient.PubSubMsg(
+ message=values[1], channel=values[0], pattern=None
+ )
+ elif (
+ message_kind == "PSubscribe"
+ or message_kind == "Subscribe"
+ or message_kind == "SSubscribe"
+ or message_kind == "Unsubscribe"
+ or message_kind == "PUnsubscribe"
+ or message_kind == "SUnsubscribe"
+ ):
+ pass
+ else:
+ ClientLogger.log(
+ LogLevel.WARN,
+ "unknown notification",
+ f"Unknown notification message: '{message_kind}'",
+ )
+
+ return pubsub_message
+
+ def _complete_pubsub_futures_safe(self):
+ while len(self._pending_push_notifications) and len(self._pubsub_futures):
+ next_push_notification = self._pending_push_notifications.pop(0)
+ pubsub_message = self._notification_to_pubsub_message_safe(
+ next_push_notification
+ )
+ if pubsub_message:
+ self._pubsub_futures.pop(0).set_result(pubsub_message)
+
+ def _write_request_await_response(self, request: CommandRequest):
+ # Create a response future for this request and add it to the available
+ # futures map
+ self._create_write_task(request)
+ return self._read_response()
+
+ def _get_callback_index(self) -> int:
+ try:
+ return self._available_callback_indexes.pop()
+ except IndexError:
+ # The list is empty
+ return len(self._available_futures)
+
+ def _process_response(self, response: Response) -> None:
+ if response.HasField("closing_error"):
+ err_msg = (
+ response.closing_error
+ if response.HasField("closing_error")
+ else f"Client Error - closing due to unknown error. callback index: {response.callback_idx}"
+ )
+ self.close(err_msg)
+ raise ClosingError(err_msg)
+ else:
+ self._available_callback_indexes.append(response.callback_idx)
+ if response.HasField("request_error"):
+ error_type = get_request_error_class(response.request_error.type)
+ raise error_type(response.request_error.message)
+ elif response.HasField("resp_pointer"):
+ return value_from_pointer(response.resp_pointer)
+ elif response.HasField("constant_response"):
+ return OK
+ else:
+ return None
+
+ def _process_push(self, response: Response) -> None:
+ if response.HasField("closing_error") or not response.HasField("resp_pointer"):
+ err_msg = (
+ response.closing_error
+ if response.HasField("closing_error")
+ else "Client Error - push notification without resp_pointer"
+ )
+ self.close(err_msg)
+ raise ClosingError(err_msg)
+
+ try:
+ self._pubsub_lock.acquire()
+ callback, context = self.config._get_pubsub_callback_and_context()
+ if callback:
+ pubsub_message = self._notification_to_pubsub_message_safe(response)
+ if pubsub_message:
+ callback(pubsub_message, context)
+ else:
+ self._pending_push_notifications.append(response)
+ self._complete_pubsub_futures_safe()
+ finally:
+ self._pubsub_lock.release()
+
+ def _read_response(self) -> None:
+ # Socket reader loop
+ remaining_read_bytes = bytearray()
+ while True:
+ read_bytes = self._socket.recv(DEFAULT_READ_BYTES_SIZE)
+ if len(read_bytes) == 0:
+ err_msg = "The communication layer was unexpectedly closed."
+ self.close(err_msg)
+ raise ClosingError(err_msg)
+ read_bytes = remaining_read_bytes + bytearray(read_bytes)
+ read_bytes_view = memoryview(read_bytes)
+ offset = 0
+ while offset <= len(read_bytes):
+ try:
+ response, offset = ProtobufCodec.decode_delimited(
+ read_bytes, read_bytes_view, offset, Response
+ )
+ except PartialMessageException:
+ # Received only partial response, break the inner loop
+ remaining_read_bytes = read_bytes[offset:]
+ break
+ response = cast(Response, response)
+ if response.is_push:
+ return self._process_push(response=response)
+ else:
+ return self._process_response(response=response)
+
+ async def get_statistics(self) -> dict:
+ return get_statistics()
+
+ async def _update_connection_password(
+ self, password: Optional[str], immediate_auth: bool
+ ) -> TResult:
+ request = CommandRequest()
+ request.callback_idx = self._get_callback_index()
+ if password is not None:
+ request.update_connection_password.password = password
+ request.update_connection_password.immediate_auth = immediate_auth
+ response = self._write_request_await_response(request)
+ # Update the client binding side password if managed to change core configuration password
+ if response is OK:
+ if self.config.credentials is None:
+ self.config.credentials = ServerCredentials(password=password or "")
+ self.config.credentials.password = password or ""
+ return response
+
+
+class UDSGlideClusterClientSync(UDSBaseClient, ClusterCommands):
+ """
+ Client used for connection to cluster servers.
+ For full documentation, see
+ https://github.com/valkey-io/valkey-glide/wiki/Python-wrapper#cluster
+ """
+
+ async def _cluster_scan(
+ self,
+ cursor: ClusterScanCursor,
+ match: Optional[TEncodable] = None,
+ count: Optional[int] = None,
+ type: Optional[ObjectType] = None,
+ allow_non_covered_slots: bool = False,
+ ) -> List[Union[ClusterScanCursor, List[bytes]]]:
+ if self._is_closed:
+ raise ClosingError(
+ "Unable to execute requests; the client is closed. Please create a new client."
+ )
+ request = CommandRequest()
+ request.callback_idx = self._get_callback_index()
+ # Take out the id string from the wrapping object
+ cursor_string = cursor.get_cursor()
+ request.cluster_scan.cursor = cursor_string
+ request.cluster_scan.allow_non_covered_slots = allow_non_covered_slots
+ if match is not None:
+ request.cluster_scan.match_pattern = (
+ self._encode_arg(match) if isinstance(match, str) else match
+ )
+ if count is not None:
+ request.cluster_scan.count = count
+ if type is not None:
+ request.cluster_scan.object_type = type.value
+ response = self._write_request_await_response(request)
+ return [ClusterScanCursor(bytes(response[0]).decode()), response[1]]
+
+ def _get_protobuf_conn_request(self) -> ConnectionRequest:
+ return self.config._create_a_protobuf_conn_request(cluster_mode=True)
+
+
+class UDSGlideClientSync(UDSBaseClient, StandaloneCommands):
+ """
+ Client used for connection to standalone servers.
+ For full documentation, see
+ https://github.com/valkey-io/valkey-glide/wiki/Python-wrapper#standalone
+ """
+
+
+TGlideClient = Union[UDSGlideClientSync, UDSGlideClusterClientSync]
diff --git a/python/python/glide/glide_sync_client.py b/python/python/glide/glide_sync_client.py
new file mode 100644
index 0000000000..fe159321ba
--- /dev/null
+++ b/python/python/glide/glide_sync_client.py
@@ -0,0 +1,177 @@
+from cffi import FFI
+from glide import GlideClusterClientConfiguration
+from glide.protobuf.command_request_pb2 import Command, CommandRequest, RequestType
+from typing import List, Union, Optional
+from glide.sync_commands.core import CoreCommands
+from glide.constants import DEFAULT_READ_BYTES_SIZE, OK, TEncodable, TRequest, TResult
+from glide.routes import Route
+
+
+class GlideSync(CoreCommands):
+ def __init__(self, config):
+ self._init_ffi()
+ # Call the `create_client` function
+ conn_req = config._create_a_protobuf_conn_request(cluster_mode=type(config) == GlideClusterClientConfiguration)
+ conn_req_bytes = conn_req.SerializeToString()
+ client_response_ptr = self.lib.create_client(conn_req_bytes, len(conn_req_bytes))
+ # Handle the connection response
+ if client_response_ptr != self.ffi.NULL:
+ client_response = self.ffi.cast("ConnectionResponse*", client_response_ptr)
+ if client_response.conn_ptr != self.ffi.NULL:
+ print("Client created successfully.")
+ self.core_client = client_response.conn_ptr
+ else:
+ error_message = self.ffi.string(client_response.connection_error_message).decode('utf-8') if client_response.connection_error_message != self.ffi.NULL else "Unknown error"
+ print(f"Failed to create client. Error: {error_message}")
+
+ # Free the connection response to avoid memory leaks
+ self.lib.free_connection_response(client_response_ptr)
+ else:
+ print("Failed to create client, response pointer is NULL.")
+
+ def _init_ffi(self):
+ self.ffi = FFI()
+
+ # Define the CommandResponse struct and related types
+ self.ffi.cdef("""
+ typedef struct CommandResponse {
+ int response_type;
+ long int_value;
+ double float_value;
+ bool bool_value;
+ char* string_value;
+ long string_value_len;
+ struct CommandResponse* array_value;
+ long array_value_len;
+ struct CommandResponse* map_key;
+ struct CommandResponse* map_value;
+ struct CommandResponse* sets_value;
+ long sets_value_len;
+ } CommandResponse;
+
+ typedef struct ConnectionResponse {
+ const void* conn_ptr;
+ const char* connection_error_message;
+ } ConnectionResponse;
+
+ const ConnectionResponse* create_client(const uint8_t* connection_request_bytes, size_t connection_request_len);
+ void free_command_response(CommandResponse* response);
+ void free_connection_response(ConnectionResponse* response);
+
+ CommandResponse* command(
+ const void *client_adapter_ptr,
+ size_t channel,
+ int command_type,
+ unsigned long arg_count,
+ const size_t *args,
+ const unsigned long *args_len
+ );
+ """)
+
+ # Load the shared library (adjust the path to your compiled Rust library)
+ self.lib = self.ffi.dlopen("/home/ubuntu/glide-for-redis/go/target/debug/libglide_rs.so")
+
+ def _handle_response(self, message):
+ if message == self.ffi.NULL:
+ print("Received NULL message.")
+ return None
+
+ # Identify the type of the message
+ message_type = self.ffi.typeof(message).cname
+
+ # If message is a pointer to CommandResponse, dereference it
+ if message_type == "CommandResponse *":
+ message = message[0] # Dereference the pointer
+ message_type = self.ffi.typeof(message).cname
+ # Check if message is now a CommandResponse
+ if message_type == "CommandResponse":
+ msg = message
+ if msg.response_type == 0: # Null
+ return None
+ elif msg.response_type == 1: # Int
+ return msg.int_value
+ elif msg.response_type == 2: # Float
+ return msg.float_value
+ elif msg.response_type == 3: # Bool
+ return bool(msg.bool_value)
+ elif msg.response_type == 4: # String
+ try:
+ string_value = self.ffi.buffer(msg.string_value, msg.string_value_len)[:]
+ return string_value
+ except Exception as e:
+ print(f"Error decoding string value: {e}")
+ elif msg.response_type == 5: # Array
+ array = []
+ for i in range(msg.array_value_len):
+ element = self.ffi.cast("struct CommandResponse*", msg.array_value + i)
+ array.append(self._handle_response(element))
+ return array
+ elif msg.response_type == 6: # Map
+ map_dict = {}
+ for i in range(msg.array_value_len):
+ key = self.ffi.cast("struct CommandResponse*", msg.map_key + i)
+ value = self.ffi.cast("struct CommandResponse*", msg.map_value + i)
+ map_dict[self._handle_response(key)] = self._handle_response(value)
+ return map_dict
+ elif msg.response_type == 7: # Sets
+ result_set = set()
+ sets_array = self.ffi.cast(f"struct CommandResponse[{msg.sets_value_len}]", msg.sets_value)
+ for i in range(msg.sets_value_len):
+ element = sets_array[i] # Already a struct
+ result_set.add(self._handle_response(element))
+ return result_set
+ else:
+ print(f"Unhandled response type = {msg.response_type}")
+ return None
+ else:
+ print(f"Unexpected message type: {message_type}")
+ return None
+
+ def _to_c_strings(self, args):
+ """Convert Python arguments to C-compatible pointers and lengths."""
+ c_strings = []
+ string_lengths = []
+ buffers = [] # Keep a reference to prevent premature garbage collection
+
+ for arg in args:
+ if isinstance(arg, str):
+ # Convert string to UTF-8 bytes
+ arg_bytes = arg.encode('utf-8')
+ elif isinstance(arg, (int, float)):
+ # Convert numeric values to strings and then to bytes
+ arg_bytes = str(arg).encode('utf-8')
+ else:
+ raise ValueError(f"Unsupported argument type: {type(arg)}")
+
+ # Use ffi.from_buffer for zero-copy conversion
+ buffers.append(arg_bytes) # Keep the byte buffer alive
+ c_strings.append(self.ffi.cast("size_t", self.ffi.from_buffer(arg_bytes)))
+ string_lengths.append(len(arg_bytes))
+ # Return C-compatible arrays and keep buffers alive
+ return (
+ self.ffi.new("size_t[]", c_strings),
+ self.ffi.new("unsigned long[]", string_lengths),
+ buffers, # Ensure buffers stay alive
+ )
+
+ def _execute_command(
+ self,
+ request_type: RequestType.ValueType,
+ args: List[TEncodable],
+ route: Optional[Route] = None,
+ ) -> TResult:
+ client_adapter_ptr = self.core_client
+ if client_adapter_ptr == self.ffi.NULL:
+ raise ValueError("Invalid client pointer.")
+
+ # Convert the arguments to C-compatible pointers
+ c_args, c_lengths, buffers = self._to_c_strings(args)
+ # Call the command function
+ return self._handle_response(self.lib.command(
+ client_adapter_ptr, # Client pointer
+ 1, # Example channel (adjust as needed)
+ request_type, # Request type (e.g., GET or SET)
+ len(args), # Number of arguments
+ c_args, # Array of argument pointers
+ c_lengths # Array of argument lengths
+ ))
diff --git a/python/python/glide/lib.h b/python/python/glide/lib.h
new file mode 100644
index 0000000000..9ce1646a65
--- /dev/null
+++ b/python/python/glide/lib.h
@@ -0,0 +1,570 @@
+/* Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */
+
+#include
+#include
+#include
+#include
+
+typedef enum RequestType {
+ /**
+ * Invalid request type
+ */
+ InvalidRequest = 0,
+ /**
+ * An unknown command, where all arguments are defined by the user.
+ */
+ CustomCommand = 1,
+ BitCount = 101,
+ BitField = 102,
+ BitFieldReadOnly = 103,
+ BitOp = 104,
+ BitPos = 105,
+ GetBit = 106,
+ SetBit = 107,
+ Asking = 201,
+ ClusterAddSlots = 202,
+ ClusterAddSlotsRange = 203,
+ ClusterBumpEpoch = 204,
+ ClusterCountFailureReports = 205,
+ ClusterCountKeysInSlot = 206,
+ ClusterDelSlots = 207,
+ ClusterDelSlotsRange = 208,
+ ClusterFailover = 209,
+ ClusterFlushSlots = 210,
+ ClusterForget = 211,
+ ClusterGetKeysInSlot = 212,
+ ClusterInfo = 213,
+ ClusterKeySlot = 214,
+ ClusterLinks = 215,
+ ClusterMeet = 216,
+ ClusterMyId = 217,
+ ClusterMyShardId = 218,
+ ClusterNodes = 219,
+ ClusterReplicas = 220,
+ ClusterReplicate = 221,
+ ClusterReset = 222,
+ ClusterSaveConfig = 223,
+ ClusterSetConfigEpoch = 224,
+ ClusterSetslot = 225,
+ ClusterShards = 226,
+ ClusterSlaves = 227,
+ ClusterSlots = 228,
+ ReadOnly = 229,
+ ReadWrite = 230,
+ Auth = 301,
+ ClientCaching = 302,
+ ClientGetName = 303,
+ ClientGetRedir = 304,
+ ClientId = 305,
+ ClientInfo = 306,
+ ClientKillSimple = 307,
+ ClientKill = 308,
+ ClientList = 309,
+ ClientNoEvict = 310,
+ ClientNoTouch = 311,
+ ClientPause = 312,
+ ClientReply = 313,
+ ClientSetInfo = 314,
+ ClientSetName = 315,
+ ClientTracking = 316,
+ ClientTrackingInfo = 317,
+ ClientUnblock = 318,
+ ClientUnpause = 319,
+ Echo = 320,
+ Hello = 321,
+ Ping = 322,
+ Quit = 323,
+ Reset = 324,
+ Select = 325,
+ Copy = 401,
+ Del = 402,
+ Dump = 403,
+ Exists = 404,
+ Expire = 405,
+ ExpireAt = 406,
+ ExpireTime = 407,
+ Keys = 408,
+ Migrate = 409,
+ Move = 410,
+ ObjectEncoding = 411,
+ ObjectFreq = 412,
+ ObjectIdleTime = 413,
+ ObjectRefCount = 414,
+ Persist = 415,
+ PExpire = 416,
+ PExpireAt = 417,
+ PExpireTime = 418,
+ PTTL = 419,
+ RandomKey = 420,
+ Rename = 421,
+ RenameNX = 422,
+ Restore = 423,
+ Scan = 424,
+ Sort = 425,
+ SortReadOnly = 426,
+ Touch = 427,
+ TTL = 428,
+ Type = 429,
+ Unlink = 430,
+ Wait = 431,
+ WaitAof = 432,
+ GeoAdd = 501,
+ GeoDist = 502,
+ GeoHash = 503,
+ GeoPos = 504,
+ GeoRadius = 505,
+ GeoRadiusReadOnly = 506,
+ GeoRadiusByMember = 507,
+ GeoRadiusByMemberReadOnly = 508,
+ GeoSearch = 509,
+ GeoSearchStore = 510,
+ HDel = 601,
+ HExists = 602,
+ HGet = 603,
+ HGetAll = 604,
+ HIncrBy = 605,
+ HIncrByFloat = 606,
+ HKeys = 607,
+ HLen = 608,
+ HMGet = 609,
+ HMSet = 610,
+ HRandField = 611,
+ HScan = 612,
+ HSet = 613,
+ HSetNX = 614,
+ HStrlen = 615,
+ HVals = 616,
+ PfAdd = 701,
+ PfCount = 702,
+ PfMerge = 703,
+ BLMove = 801,
+ BLMPop = 802,
+ BLPop = 803,
+ BRPop = 804,
+ BRPopLPush = 805,
+ LIndex = 806,
+ LInsert = 807,
+ LLen = 808,
+ LMove = 809,
+ LMPop = 810,
+ LPop = 811,
+ LPos = 812,
+ LPush = 813,
+ LPushX = 814,
+ LRange = 815,
+ LRem = 816,
+ LSet = 817,
+ LTrim = 818,
+ RPop = 819,
+ RPopLPush = 820,
+ RPush = 821,
+ RPushX = 822,
+ PSubscribe = 901,
+ Publish = 902,
+ PubSubChannels = 903,
+ PubSubNumPat = 904,
+ PubSubNumSub = 905,
+ PubSubShardChannels = 906,
+ PubSubShardNumSub = 907,
+ PUnsubscribe = 908,
+ SPublish = 909,
+ SSubscribe = 910,
+ Subscribe = 911,
+ SUnsubscribe = 912,
+ Unsubscribe = 913,
+ Eval = 1001,
+ EvalReadOnly = 1002,
+ EvalSha = 1003,
+ EvalShaReadOnly = 1004,
+ FCall = 1005,
+ FCallReadOnly = 1006,
+ FunctionDelete = 1007,
+ FunctionDump = 1008,
+ FunctionFlush = 1009,
+ FunctionKill = 1010,
+ FunctionList = 1011,
+ FunctionLoad = 1012,
+ FunctionRestore = 1013,
+ FunctionStats = 1014,
+ ScriptDebug = 1015,
+ ScriptExists = 1016,
+ ScriptFlush = 1017,
+ ScriptKill = 1018,
+ ScriptLoad = 1019,
+ ScriptShow = 1020,
+ AclCat = 1101,
+ AclDelUser = 1102,
+ AclDryRun = 1103,
+ AclGenPass = 1104,
+ AclGetUser = 1105,
+ AclList = 1106,
+ AclLoad = 1107,
+ AclLog = 1108,
+ AclSave = 1109,
+ AclSetSser = 1110,
+ AclUsers = 1111,
+ AclWhoami = 1112,
+ BgRewriteAof = 1113,
+ BgSave = 1114,
+ Command_ = 1115,
+ CommandCount = 1116,
+ CommandDocs = 1117,
+ CommandGetKeys = 1118,
+ CommandGetKeysAndFlags = 1119,
+ CommandInfo = 1120,
+ CommandList = 1121,
+ ConfigGet = 1122,
+ ConfigResetStat = 1123,
+ ConfigRewrite = 1124,
+ ConfigSet = 1125,
+ DBSize = 1126,
+ FailOver = 1127,
+ FlushAll = 1128,
+ FlushDB = 1129,
+ Info = 1130,
+ LastSave = 1131,
+ LatencyDoctor = 1132,
+ LatencyGraph = 1133,
+ LatencyHistogram = 1134,
+ LatencyHistory = 1135,
+ LatencyLatest = 1136,
+ LatencyReset = 1137,
+ Lolwut = 1138,
+ MemoryDoctor = 1139,
+ MemoryMallocStats = 1140,
+ MemoryPurge = 1141,
+ MemoryStats = 1142,
+ MemoryUsage = 1143,
+ ModuleList = 1144,
+ ModuleLoad = 1145,
+ ModuleLoadEx = 1146,
+ ModuleUnload = 1147,
+ Monitor = 1148,
+ PSync = 1149,
+ ReplConf = 1150,
+ ReplicaOf = 1151,
+ RestoreAsking = 1152,
+ Role = 1153,
+ Save = 1154,
+ ShutDown = 1155,
+ SlaveOf = 1156,
+ SlowLogGet = 1157,
+ SlowLogLen = 1158,
+ SlowLogReset = 1159,
+ SwapDb = 1160,
+ Sync = 1161,
+ Time = 1162,
+ SAdd = 1201,
+ SCard = 1202,
+ SDiff = 1203,
+ SDiffStore = 1204,
+ SInter = 1205,
+ SInterCard = 1206,
+ SInterStore = 1207,
+ SIsMember = 1208,
+ SMembers = 1209,
+ SMIsMember = 1210,
+ SMove = 1211,
+ SPop = 1212,
+ SRandMember = 1213,
+ SRem = 1214,
+ SScan = 1215,
+ SUnion = 1216,
+ SUnionStore = 1217,
+ BZMPop = 1301,
+ BZPopMax = 1302,
+ BZPopMin = 1303,
+ ZAdd = 1304,
+ ZCard = 1305,
+ ZCount = 1306,
+ ZDiff = 1307,
+ ZDiffStore = 1308,
+ ZIncrBy = 1309,
+ ZInter = 1310,
+ ZInterCard = 1311,
+ ZInterStore = 1312,
+ ZLexCount = 1313,
+ ZMPop = 1314,
+ ZMScore = 1315,
+ ZPopMax = 1316,
+ ZPopMin = 1317,
+ ZRandMember = 1318,
+ ZRange = 1319,
+ ZRangeByLex = 1320,
+ ZRangeByScore = 1321,
+ ZRangeStore = 1322,
+ ZRank = 1323,
+ ZRem = 1324,
+ ZRemRangeByLex = 1325,
+ ZRemRangeByRank = 1326,
+ ZRemRangeByScore = 1327,
+ ZRevRange = 1328,
+ ZRevRangeByLex = 1329,
+ ZRevRangeByScore = 1330,
+ ZRevRank = 1331,
+ ZScan = 1332,
+ ZScore = 1333,
+ ZUnion = 1334,
+ ZUnionStore = 1335,
+ XAck = 1401,
+ XAdd = 1402,
+ XAutoClaim = 1403,
+ XClaim = 1404,
+ XDel = 1405,
+ XGroupCreate = 1406,
+ XGroupCreateConsumer = 1407,
+ XGroupDelConsumer = 1408,
+ XGroupDestroy = 1409,
+ XGroupSetId = 1410,
+ XInfoConsumers = 1411,
+ XInfoGroups = 1412,
+ XInfoStream = 1413,
+ XLen = 1414,
+ XPending = 1415,
+ XRange = 1416,
+ XRead = 1417,
+ XReadGroup = 1418,
+ XRevRange = 1419,
+ XSetId = 1420,
+ XTrim = 1421,
+ Append = 1501,
+ Decr = 1502,
+ DecrBy = 1503,
+ Get = 1504,
+ GetDel = 1505,
+ GetEx = 1506,
+ GetRange = 1507,
+ GetSet = 1508,
+ Incr = 1509,
+ IncrBy = 1510,
+ IncrByFloat = 1511,
+ LCS = 1512,
+ MGet = 1513,
+ MSet = 1514,
+ MSetNX = 1515,
+ PSetEx = 1516,
+ Set = 1517,
+ SetEx = 1518,
+ SetNX = 1519,
+ SetRange = 1520,
+ Strlen = 1521,
+ Substr = 1522,
+ Discard = 1601,
+ Exec = 1602,
+ Multi = 1603,
+ UnWatch = 1604,
+ Watch = 1605,
+ JsonArrAppend = 2001,
+ JsonArrIndex = 2002,
+ JsonArrInsert = 2003,
+ JsonArrLen = 2004,
+ JsonArrPop = 2005,
+ JsonArrTrim = 2006,
+ JsonClear = 2007,
+ JsonDebug = 2008,
+ JsonDel = 2009,
+ JsonForget = 2010,
+ JsonGet = 2011,
+ JsonMGet = 2012,
+ JsonNumIncrBy = 2013,
+ JsonNumMultBy = 2014,
+ JsonObjKeys = 2015,
+ JsonObjLen = 2016,
+ JsonResp = 2017,
+ JsonSet = 2018,
+ JsonStrAppend = 2019,
+ JsonStrLen = 2020,
+ JsonToggle = 2021,
+ JsonType = 2022,
+ FtList = 2101,
+ FtAggregate = 2102,
+ FtAliasAdd = 2103,
+ FtAliasDel = 2104,
+ FtAliasList = 2105,
+ FtAliasUpdate = 2106,
+ FtCreate = 2107,
+ FtDropIndex = 2108,
+ FtExplain = 2109,
+ FtExplainCli = 2110,
+ FtInfo = 2111,
+ FtProfile = 2112,
+ FtSearch = 2113,
+} RequestType;
+
+typedef enum ResponseType {
+ Null = 0,
+ Int = 1,
+ Float = 2,
+ Bool = 3,
+ String = 4,
+ Array = 5,
+ Map = 6,
+ Sets = 7,
+} ResponseType;
+
+/**
+ * The connection response.
+ *
+ * It contains either a connection or an error. It is represented as a struct instead of a union for ease of use in the wrapper language.
+ *
+ * The struct is freed by the external caller by using `free_connection_response` to avoid memory leaks.
+ */
+typedef struct ConnectionResponse {
+ const void *conn_ptr;
+ const char *connection_error_message;
+} ConnectionResponse;
+
+/**
+ * The struct represents the response of the command.
+ *
+ * It will have one of the value populated depending on the return type of the command.
+ *
+ * The struct is freed by the external caller by using `free_command_response` to avoid memory leaks.
+ * TODO: Add a type enum to validate what type of response is being sent in the CommandResponse.
+ */
+typedef struct CommandResponse {
+ enum ResponseType response_type;
+ long int_value;
+ double float_value;
+ bool bool_value;
+ /**
+ * Below two values are related to each other.
+ * `string_value` represents the string.
+ * `string_value_len` represents the length of the string.
+ */
+ char *string_value;
+ long string_value_len;
+ /**
+ * Below two values are related to each other.
+ * `array_value` represents the array of CommandResponse.
+ * `array_value_len` represents the length of the array.
+ */
+ struct CommandResponse *array_value;
+ long array_value_len;
+ /**
+ * Below two values represent the Map structure inside CommandResponse.
+ * The map is transformed into an array of (map_key: CommandResponse, map_value: CommandResponse) and passed to Go.
+ * These are represented as pointers as the map can be null (optionally present).
+ */
+ struct CommandResponse *map_key;
+ struct CommandResponse *map_value;
+ /**
+ * Below two values are related to each other.
+ * `sets_value` represents the set of CommandResponse.
+ * `sets_value_len` represents the length of the set.
+ */
+ struct CommandResponse *sets_value;
+ long sets_value_len;
+} CommandResponse;
+
+/**
+ * Creates a new `ClientAdapter` with a new `GlideClient` configured using a Protobuf `ConnectionRequest`.
+ *
+ * The returned `ConnectionResponse` will only be freed by calling [`free_connection_response`].
+ *
+ * `connection_request_bytes` is an array of bytes that will be parsed into a Protobuf `ConnectionRequest` object.
+ * `connection_request_len` is the number of bytes in `connection_request_bytes`.
+ * `success_callback` is the callback that will be called when a command succeeds.
+ * `failure_callback` is the callback that will be called when a command fails.
+ *
+ * # Safety
+ *
+ * * `connection_request_bytes` must point to `connection_request_len` consecutive properly initialized bytes. It must be a well-formed Protobuf `ConnectionRequest` object. The array must be allocated by the caller and subsequently freed by the caller after this function returns.
+ * * `connection_request_len` must not be greater than the length of the connection request bytes array. It must also not be greater than the max value of a signed pointer-sized integer.
+ * * The `conn_ptr` pointer in the returned `ConnectionResponse` must live while the client is open/active and must be explicitly freed by calling [`close_client`].
+ * * The `connection_error_message` pointer in the returned `ConnectionResponse` must live until the returned `ConnectionResponse` pointer is passed to [`free_connection_response`].
+ * * Both the `success_callback` and `failure_callback` function pointers need to live while the client is open/active. The caller is responsible for freeing both callbacks.
+ */
+const struct ConnectionResponse *create_client(void);
+
+/**
+ * Closes the given `GlideClient`, freeing it from the heap.
+ *
+ * `client_adapter_ptr` is a pointer to a valid `GlideClient` returned in the `ConnectionResponse` from [`create_client`].
+ *
+ * # Panics
+ *
+ * This function panics when called with a null `client_adapter_ptr`.
+ *
+ * # Safety
+ *
+ * * `close_client` can only be called once per client. Calling it twice is undefined behavior, since the address will be freed twice.
+ * * `close_client` must be called after `free_connection_response` has been called to avoid creating a dangling pointer in the `ConnectionResponse`.
+ * * `client_adapter_ptr` must be obtained from the `ConnectionResponse` returned from [`create_client`].
+ * * `client_adapter_ptr` must be valid until `close_client` is called.
+ */
+void close_client(const void *client_adapter_ptr);
+
+/**
+ * Deallocates a `ConnectionResponse`.
+ *
+ * This function also frees the contained error. If the contained error is a null pointer, the function returns and only the `ConnectionResponse` is freed.
+ *
+ * # Panics
+ *
+ * This function panics when called with a null `ConnectionResponse` pointer.
+ *
+ * # Safety
+ *
+ * * `free_connection_response` can only be called once per `ConnectionResponse`. Calling it twice is undefined behavior, since the address will be freed twice.
+ * * `connection_response_ptr` must be obtained from the `ConnectionResponse` returned from [`create_client`].
+ * * `connection_response_ptr` must be valid until `free_connection_response` is called.
+ * * The contained `connection_error_message` must be obtained from the `ConnectionResponse` returned from [`create_client`].
+ * * The contained `connection_error_message` must be valid until `free_connection_response` is called and it must outlive the `ConnectionResponse` that contains it.
+ */
+void free_connection_response(struct ConnectionResponse *connection_response_ptr);
+
+/**
+ * Provides the string mapping for the ResponseType enum.
+ */
+char *get_response_type_string(enum ResponseType response_type);
+
+/**
+ * Deallocates a string generated via get_response_type_string.
+ *
+ * # Safety
+ * free_response_type_string can be called only once per response_string.
+ */
+void free_response_type_string(char *response_string);
+
+/**
+ * Deallocates a `CommandResponse`.
+ *
+ * This function also frees the contained string_value and array_value. If the string_value and array_value are null pointers, the function returns and only the `CommandResponse` is freed.
+ *
+ * # Safety
+ *
+ * * `free_command_response` can only be called once per `CommandResponse`. Calling it twice is undefined behavior, since the address will be freed twice.
+ * * `command_response_ptr` must be obtained from the `CommandResponse` returned in [`SuccessCallback`] from [`command`].
+ * * `command_response_ptr` must be valid until `free_command_response` is called.
+ */
+void free_command_response(struct CommandResponse *command_response_ptr);
+
+/**
+ * Frees the error_message received on a command failure.
+ * TODO: Add a test case to check for memory leak.
+ *
+ * # Panics
+ *
+ * This functions panics when called with a null `c_char` pointer.
+ *
+ * # Safety
+ *
+ * `free_error_message` can only be called once per `error_message`. Calling it twice is undefined
+ * behavior, since the address will be freed twice.
+ */
+void free_error_message(char *error_message);
+
+/**
+ * Executes a command.
+ *
+ * # Safety
+ *
+ * * TODO: finish safety section.
+ */
+struct CommandResponse *command(const void *client_adapter_ptr,
+ uintptr_t channel,
+ enum RequestType command_type,
+ unsigned long arg_count,
+ const uintptr_t *args,
+ const unsigned long *args_len);
diff --git a/python/python/glide/sync_commands/core.py b/python/python/glide/sync_commands/core.py
new file mode 100644
index 0000000000..856dcda2ea
--- /dev/null
+++ b/python/python/glide/sync_commands/core.py
@@ -0,0 +1,7000 @@
+# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
+from dataclasses import dataclass
+from datetime import datetime, timedelta
+from enum import Enum
+from typing import (
+ Dict,
+ List,
+ Mapping,
+ Optional,
+ Protocol,
+ Set,
+ Tuple,
+ Type,
+ Union,
+ cast,
+ get_args,
+)
+
+from glide.async_commands.bitmap import (
+ BitFieldGet,
+ BitFieldSubCommands,
+ BitmapIndexType,
+ BitwiseOperation,
+ OffsetOptions,
+ _create_bitfield_args,
+ _create_bitfield_read_only_args,
+)
+from glide.async_commands.command_args import Limit, ListDirection, ObjectType, OrderBy
+from glide.async_commands.sorted_set import (
+ AggregationType,
+ GeoSearchByBox,
+ GeoSearchByRadius,
+ GeoSearchCount,
+ GeospatialData,
+ GeoUnit,
+ InfBound,
+ LexBoundary,
+ RangeByIndex,
+ RangeByLex,
+ RangeByScore,
+ ScoreBoundary,
+ ScoreFilter,
+ _create_geosearch_args,
+ _create_zinter_zunion_cmd_args,
+ _create_zrange_args,
+)
+from glide.async_commands.stream import (
+ StreamAddOptions,
+ StreamClaimOptions,
+ StreamGroupOptions,
+ StreamPendingOptions,
+ StreamRangeBound,
+ StreamReadGroupOptions,
+ StreamReadOptions,
+ StreamTrimOptions,
+ _create_xpending_range_args,
+)
+from glide.constants import (
+ TOK,
+ TEncodable,
+ TResult,
+ TXInfoStreamFullResponse,
+ TXInfoStreamResponse,
+)
+from glide.protobuf.command_request_pb2 import RequestType
+from glide.routes import Route
+
+from ..glide import ClusterScanCursor
+
+
+class ConditionalChange(Enum):
+ """
+ A condition to the `SET`, `ZADD` and `GEOADD` commands.
+ - ONLY_IF_EXISTS - Only update key / elements that already exist. Equivalent to `XX` in the Valkey API.
+ - ONLY_IF_DOES_NOT_EXIST - Only set key / add elements that does not already exist. Equivalent to `NX` in the Valkey API.
+ """
+
+ ONLY_IF_EXISTS = "XX"
+ ONLY_IF_DOES_NOT_EXIST = "NX"
+
+
+class ExpiryType(Enum):
+ """SET option: The type of the expiry.
+ - SEC - Set the specified expire time, in seconds. Equivalent to `EX` in the Valkey API.
+ - MILLSEC - Set the specified expire time, in milliseconds. Equivalent to `PX` in the Valkey API.
+ - UNIX_SEC - Set the specified Unix time at which the key will expire, in seconds. Equivalent to `EXAT` in the Valkey API.
+ - UNIX_MILLSEC - Set the specified Unix time at which the key will expire, in milliseconds. Equivalent to `PXAT` in the
+ Valkey API.
+ - KEEP_TTL - Retain the time to live associated with the key. Equivalent to `KEEPTTL` in the Valkey API.
+ """
+
+ SEC = 0, Union[int, timedelta] # Equivalent to `EX` in the Valkey API
+ MILLSEC = 1, Union[int, timedelta] # Equivalent to `PX` in the Valkey API
+ UNIX_SEC = 2, Union[int, datetime] # Equivalent to `EXAT` in the Valkey API
+ UNIX_MILLSEC = 3, Union[int, datetime] # Equivalent to `PXAT` in the Valkey API
+ KEEP_TTL = 4, Type[None] # Equivalent to `KEEPTTL` in the Valkey API
+
+
+class ExpiryTypeGetEx(Enum):
+ """GetEx option: The type of the expiry.
+ - EX - Set the specified expire time, in seconds. Equivalent to `EX` in the Valkey API.
+ - PX - Set the specified expire time, in milliseconds. Equivalent to `PX` in the Valkey API.
+ - UNIX_SEC - Set the specified Unix time at which the key will expire, in seconds. Equivalent to `EXAT` in the Valkey API.
+ - UNIX_MILLSEC - Set the specified Unix time at which the key will expire, in milliseconds. Equivalent to `PXAT` in the
+ Valkey API.
+ - PERSIST - Remove the time to live associated with the key. Equivalent to `PERSIST` in the Valkey API.
+ """
+
+ SEC = 0, Union[int, timedelta] # Equivalent to `EX` in the Valkey API
+ MILLSEC = 1, Union[int, timedelta] # Equivalent to `PX` in the Valkey API
+ UNIX_SEC = 2, Union[int, datetime] # Equivalent to `EXAT` in the Valkey API
+ UNIX_MILLSEC = 3, Union[int, datetime] # Equivalent to `PXAT` in the Valkey API
+ PERSIST = 4, Type[None] # Equivalent to `PERSIST` in the Valkey API
+
+
+class InfoSection(Enum):
+ """
+ INFO option: a specific section of information:
+
+ -SERVER: General information about the server
+ -CLIENTS: Client connections section
+ -MEMORY: Memory consumption related information
+ -PERSISTENCE: RDB and AOF related information
+ -STATS: General statistics
+ -REPLICATION: Master/replica replication information
+ -CPU: CPU consumption statistics
+ -COMMANDSTATS: Valkey command statistics
+ -LATENCYSTATS: Valkey command latency percentile distribution statistics
+ -SENTINEL: Valkey Sentinel section (only applicable to Sentinel instances)
+ -CLUSTER: Valkey Cluster section
+ -MODULES: Modules section
+ -KEYSPACE: Database related statistics
+ -ERRORSTATS: Valkey error statistics
+ -ALL: Return all sections (excluding module generated ones)
+ -DEFAULT: Return only the default set of sections
+ -EVERYTHING: Includes all and modules
+ When no parameter is provided, the default option is assumed.
+ """
+
+ SERVER = "server"
+ CLIENTS = "clients"
+ MEMORY = "memory"
+ PERSISTENCE = "persistence"
+ STATS = "stats"
+ REPLICATION = "replication"
+ CPU = "cpu"
+ COMMAND_STATS = "commandstats"
+ LATENCY_STATS = "latencystats"
+ SENTINEL = "sentinel"
+ CLUSTER = "cluster"
+ MODULES = "modules"
+ KEYSPACE = "keyspace"
+ ERROR_STATS = "errorstats"
+ ALL = "all"
+ DEFAULT = "default"
+ EVERYTHING = "everything"
+
+
+class ExpireOptions(Enum):
+ """
+ EXPIRE option: options for setting key expiry.
+
+ - HasNoExpiry: Set expiry only when the key has no expiry (Equivalent to "NX" in Valkey).
+ - HasExistingExpiry: Set expiry only when the key has an existing expiry (Equivalent to "XX" in Valkey).
+ - NewExpiryGreaterThanCurrent: Set expiry only when the new expiry is greater than the current one (Equivalent
+ to "GT" in Valkey).
+ - NewExpiryLessThanCurrent: Set expiry only when the new expiry is less than the current one (Equivalent to "LT" in Valkey).
+ """
+
+ HasNoExpiry = "NX"
+ HasExistingExpiry = "XX"
+ NewExpiryGreaterThanCurrent = "GT"
+ NewExpiryLessThanCurrent = "LT"
+
+
+class UpdateOptions(Enum):
+ """
+ Options for updating elements of a sorted set key.
+
+ - LESS_THAN: Only update existing elements if the new score is less than the current score.
+ - GREATER_THAN: Only update existing elements if the new score is greater than the current score.
+ """
+
+ LESS_THAN = "LT"
+ GREATER_THAN = "GT"
+
+
+class ExpirySet:
+ """SET option: Represents the expiry type and value to be executed with "SET" command."""
+
+ def __init__(
+ self,
+ expiry_type: ExpiryType,
+ value: Optional[Union[int, datetime, timedelta]],
+ ) -> None:
+ """
+ Args:
+ - expiry_type (ExpiryType): The expiry type.
+ - value (Optional[Union[int, datetime, timedelta]]): The value of the expiration type. The type of expiration
+ determines the type of expiration value:
+ - SEC: Union[int, timedelta]
+ - MILLSEC: Union[int, timedelta]
+ - UNIX_SEC: Union[int, datetime]
+ - UNIX_MILLSEC: Union[int, datetime]
+ - KEEP_TTL: Type[None]
+ """
+ self.set_expiry_type_and_value(expiry_type, value)
+
+ def set_expiry_type_and_value(
+ self, expiry_type: ExpiryType, value: Optional[Union[int, datetime, timedelta]]
+ ):
+ if not isinstance(value, get_args(expiry_type.value[1])):
+ raise ValueError(
+ f"The value of {expiry_type} should be of type {expiry_type.value[1]}"
+ )
+ self.expiry_type = expiry_type
+ if self.expiry_type == ExpiryType.SEC:
+ self.cmd_arg = "EX"
+ if isinstance(value, timedelta):
+ value = int(value.total_seconds())
+ elif self.expiry_type == ExpiryType.MILLSEC:
+ self.cmd_arg = "PX"
+ if isinstance(value, timedelta):
+ value = int(value.total_seconds() * 1000)
+ elif self.expiry_type == ExpiryType.UNIX_SEC:
+ self.cmd_arg = "EXAT"
+ if isinstance(value, datetime):
+ value = int(value.timestamp())
+ elif self.expiry_type == ExpiryType.UNIX_MILLSEC:
+ self.cmd_arg = "PXAT"
+ if isinstance(value, datetime):
+ value = int(value.timestamp() * 1000)
+ elif self.expiry_type == ExpiryType.KEEP_TTL:
+ self.cmd_arg = "KEEPTTL"
+ self.value = str(value) if value else None
+
+ def get_cmd_args(self) -> List[str]:
+ return [self.cmd_arg] if self.value is None else [self.cmd_arg, self.value]
+
+
+class ExpiryGetEx:
+ """GetEx option: Represents the expiry type and value to be executed with "GetEx" command."""
+
+ def __init__(
+ self,
+ expiry_type: ExpiryTypeGetEx,
+ value: Optional[Union[int, datetime, timedelta]],
+ ) -> None:
+ """
+ Args:
+ - expiry_type (ExpiryType): The expiry type.
+ - value (Optional[Union[int, datetime, timedelta]]): The value of the expiration type. The type of expiration
+ determines the type of expiration value:
+ - SEC: Union[int, timedelta]
+ - MILLSEC: Union[int, timedelta]
+ - UNIX_SEC: Union[int, datetime]
+ - UNIX_MILLSEC: Union[int, datetime]
+ - PERSIST: Type[None]
+ """
+ self.set_expiry_type_and_value(expiry_type, value)
+
+ def set_expiry_type_and_value(
+ self,
+ expiry_type: ExpiryTypeGetEx,
+ value: Optional[Union[int, datetime, timedelta]],
+ ):
+ if not isinstance(value, get_args(expiry_type.value[1])):
+ raise ValueError(
+ f"The value of {expiry_type} should be of type {expiry_type.value[1]}"
+ )
+ self.expiry_type = expiry_type
+ if self.expiry_type == ExpiryTypeGetEx.SEC:
+ self.cmd_arg = "EX"
+ if isinstance(value, timedelta):
+ value = int(value.total_seconds())
+ elif self.expiry_type == ExpiryTypeGetEx.MILLSEC:
+ self.cmd_arg = "PX"
+ if isinstance(value, timedelta):
+ value = int(value.total_seconds() * 1000)
+ elif self.expiry_type == ExpiryTypeGetEx.UNIX_SEC:
+ self.cmd_arg = "EXAT"
+ if isinstance(value, datetime):
+ value = int(value.timestamp())
+ elif self.expiry_type == ExpiryTypeGetEx.UNIX_MILLSEC:
+ self.cmd_arg = "PXAT"
+ if isinstance(value, datetime):
+ value = int(value.timestamp() * 1000)
+ elif self.expiry_type == ExpiryTypeGetEx.PERSIST:
+ self.cmd_arg = "PERSIST"
+ self.value = str(value) if value else None
+
+ def get_cmd_args(self) -> List[str]:
+ return [self.cmd_arg] if self.value is None else [self.cmd_arg, self.value]
+
+
+class InsertPosition(Enum):
+ BEFORE = "BEFORE"
+ AFTER = "AFTER"
+
+
+class FlushMode(Enum):
+ """
+ Defines flushing mode for:
+
+ `FLUSHALL` command and `FUNCTION FLUSH` command.
+
+ See https://valkey.io/commands/flushall/ and https://valkey.io/commands/function-flush/ for details
+
+ SYNC was introduced in version 6.2.0.
+ """
+
+ ASYNC = "ASYNC"
+ SYNC = "SYNC"
+
+
+class FunctionRestorePolicy(Enum):
+ """
+ Options for the FUNCTION RESTORE command.
+
+ - APPEND: Appends the restored libraries to the existing libraries and aborts on collision. This is the
+ default policy.
+ - FLUSH: Deletes all existing libraries before restoring the payload.
+ - REPLACE: Appends the restored libraries to the existing libraries, replacing any existing ones in case
+ of name collisions. Note that this policy doesn't prevent function name collisions, only libraries.
+ """
+
+ APPEND = "APPEND"
+ FLUSH = "FLUSH"
+ REPLACE = "REPLACE"
+
+
+def _build_sort_args(
+ key: TEncodable,
+ by_pattern: Optional[TEncodable] = None,
+ limit: Optional[Limit] = None,
+ get_patterns: Optional[List[TEncodable]] = None,
+ order: Optional[OrderBy] = None,
+ alpha: Optional[bool] = None,
+ store: Optional[TEncodable] = None,
+) -> List[TEncodable]:
+ args = [key]
+
+ if by_pattern:
+ args.extend(["BY", by_pattern])
+
+ if limit:
+ args.extend(["LIMIT", str(limit.offset), str(limit.count)])
+
+ if get_patterns:
+ for pattern in get_patterns:
+ args.extend(["GET", pattern])
+
+ if order:
+ args.append(order.value)
+
+ if alpha:
+ args.append("ALPHA")
+
+ if store:
+ args.extend(["STORE", store])
+
+ return args
+
+
+class CoreCommands(Protocol):
+ def _execute_command(
+ self,
+ request_type: RequestType.ValueType,
+ args: List[TEncodable],
+ route: Optional[Route] = ...,
+ ) -> TResult: ...
+
+ def _execute_transaction(
+ self,
+ commands: List[Tuple[RequestType.ValueType, List[TEncodable]]],
+ route: Optional[Route] = None,
+ ) -> List[TResult]: ...
+
+ def _execute_script(
+ self,
+ hash: str,
+ keys: Optional[List[TEncodable]] = None,
+ args: Optional[List[TEncodable]] = None,
+ route: Optional[Route] = None,
+ ) -> TResult: ...
+
+ def _cluster_scan(
+ self,
+ cursor: ClusterScanCursor,
+ match: Optional[TEncodable] = ...,
+ count: Optional[int] = ...,
+ type: Optional[ObjectType] = ...,
+ allow_non_covered_slots: bool = ...,
+ ) -> TResult: ...
+
+ def _update_connection_password(
+ self, password: Optional[str], immediate_auth: bool
+ ) -> TResult: ...
+
+ def update_connection_password(
+ self, password: Optional[str], immediate_auth=False
+ ) -> TOK:
+ """
+ Update the current connection password with a new password.
+
+ **Note:** This method updates the client's internal password configuration and does
+ not perform password rotation on the server side.
+
+ This method is useful in scenarios where the server password has changed or when
+ utilizing short-lived passwords for enhanced security. It allows the client to
+ update its password to reconnect upon disconnection without the need to recreate
+ the client instance. This ensures that the internal reconnection mechanism can
+ handle reconnection seamlessly, preventing the loss of in-flight commands.
+
+ Args:
+ password (`Optional[str]`): The new password to use for the connection,
+ if `None` the password will be removed.
+ immediate_auth (`bool`):
+ - `True`: The client will authenticate immediately with the new password against all connections, Using `AUTH` command.
+ If password supplied is an empty string, auth will not be performed and warning will be returned.
+ The default is `False`.
+
+ Returns:
+ TOK: A simple OK response. If `immediate_auth=True` returns OK if the reauthenticate succeed.
+
+ Example:
+ >>> client.update_connection_password("new_password", immediate_auth=True)
+ 'OK'
+ """
+ return cast(
+ TOK, self._update_connection_password(password, immediate_auth)
+ )
+
+ def set(
+ self,
+ key: TEncodable,
+ value: TEncodable,
+ conditional_set: Optional[ConditionalChange] = None,
+ expiry: Optional[ExpirySet] = None,
+ return_old_value: bool = False,
+ ) -> Optional[bytes]:
+ """
+ Set the given key with the given value. Return value is dependent on the passed options.
+ See https://valkey.io/commands/set/ for more details.
+
+ Args:
+ key (TEncodable): the key to store.
+ value (TEncodable): the value to store with the given key.
+ conditional_set (Optional[ConditionalChange], optional): set the key only if the given condition is met.
+ Equivalent to [`XX` | `NX`] in the Valkey API. Defaults to None.
+ expiry (Optional[ExpirySet], optional): set expiriation to the given key.
+ Equivalent to [`EX` | `PX` | `EXAT` | `PXAT` | `KEEPTTL`] in the Valkey API. Defaults to None.
+ return_old_value (bool, optional): Return the old value stored at key, or None if key did not exist.
+ An error is returned and SET aborted if the value stored at key is not a string.
+ Equivalent to `GET` in the Valkey API. Defaults to False.
+
+ Returns:
+ Optional[bytes]:
+ If the value is successfully set, return OK.
+ If value isn't set because of only_if_exists or only_if_does_not_exist conditions, return None.
+ If return_old_value is set, return the old value as a bytes string.
+
+ Example:
+ >>> client.set(b"key", b"value")
+ 'OK'
+ >>> client.set("key", "new_value",conditional_set=ConditionalChange.ONLY_IF_EXISTS, expiry=Expiry(ExpiryType.SEC, 5))
+ 'OK' # Set "new_value" to "key" only if "key" already exists, and set the key expiration to 5 seconds.
+ >>> client.set("key", "value", conditional_set=ConditionalChange.ONLY_IF_DOES_NOT_EXIST,return_old_value=True)
+ b'new_value' # Returns the old value of "key".
+ >>> client.get("key")
+ b'new_value' # Value wasn't modified back to being "value" because of "NX" flag.
+ """
+ args = [key, value]
+ if conditional_set:
+ args.append(conditional_set.value)
+ if return_old_value:
+ args.append("GET")
+ if expiry is not None:
+ args.extend(expiry.get_cmd_args())
+ return cast(Optional[bytes], self._execute_command(RequestType.Set, args))
+
+ def get(self, key: TEncodable) -> Optional[bytes]:
+ """
+ Get the value associated with the given key, or null if no such value exists.
+ See https://valkey.io/commands/get/ for details.
+
+ Args:
+ key (TEncodable): The key to retrieve from the database.
+
+ Returns:
+ Optional[bytes]: If the key exists, returns the value of the key as a byte string. Otherwise, return None.
+
+ Example:
+ >>> client.get("key")
+ b'value'
+ """
+ args: List[TEncodable] = [key]
+ return cast(Optional[bytes], self._execute_command(RequestType.Get, args))
+
+ def getdel(self, key: TEncodable) -> Optional[bytes]:
+ """
+ Gets a value associated with the given string `key` and deletes the key.
+
+ See https://valkey.io/commands/getdel for more details.
+
+ Args:
+ key (TEncodable): The `key` to retrieve from the database.
+
+ Returns:
+ Optional[bytes]: If `key` exists, returns the `value` of `key`. Otherwise, returns `None`.
+
+ Examples:
+ >>> client.set("key", "value")
+ >>> client.getdel("key")
+ b'value'
+ >>> client.getdel("key")
+ None
+ """
+ return cast(
+ Optional[bytes], self._execute_command(RequestType.GetDel, [key])
+ )
+
+ def getrange(self, key: TEncodable, start: int, end: int) -> bytes:
+ """
+ Returns the substring of the value stored at `key`, determined by the offsets `start` and `end` (both are inclusive).
+ Negative offsets can be used in order to provide an offset starting from the end of the value.
+ So `-1` means the last character, `-2` the penultimate and so forth.
+
+ If `key` does not exist, an empty byte string is returned. If `start` or `end`
+ are out of range, returns the substring within the valid range of the value.
+
+ See https://valkey.io/commands/getrange/ for more details.
+
+ Args:
+ key (TEncodable): The key of the string.
+ start (int): The starting offset.
+ end (int): The ending offset.
+
+ Returns:
+ bytes: A substring extracted from the value stored at `key`.
+
+ Examples:
+ >>> client.set("mykey", "This is a string")
+ >>> client.getrange("mykey", 0, 3)
+ b"This"
+ >>> client.getrange("mykey", -3, -1)
+ b"ing" # extracted last 3 characters of a string
+ >>> client.getrange("mykey", 0, 100)
+ b"This is a string"
+ >>> client.getrange("non_existing", 5, 6)
+ b""
+ """
+ return cast(
+ bytes,
+ self._execute_command(
+ RequestType.GetRange, [key, str(start), str(end)]
+ ),
+ )
+
+ def append(self, key: TEncodable, value: TEncodable) -> int:
+ """
+ Appends a value to a key.
+ If `key` does not exist it is created and set as an empty string, so `APPEND` will be similar to `SET` in this special case.
+
+ See https://valkey.io/commands/append for more details.
+
+ Args:
+ key (TEncodable): The key to which the value will be appended.
+ value (TEncodable): The value to append.
+
+ Returns:
+ int: The length of the stored value after appending `value`.
+
+ Examples:
+ >>> client.append("key", "Hello")
+ 5 # Indicates that "Hello" has been appended to the value of "key", which was initially empty, resulting in a new value of "Hello" with a length of 5 - similar to the set operation.
+ >>> client.append("key", " world")
+ 11 # Indicates that " world" has been appended to the value of "key", resulting in a new value of "Hello world" with a length of 11.
+ >>> client.get("key")
+ b"Hello world" # Returns the value stored in "key", which is now "Hello world".
+ """
+ return cast(int, self._execute_command(RequestType.Append, [key, value]))
+
+ def strlen(self, key: TEncodable) -> int:
+ """
+ Get the length of the string value stored at `key`.
+ See https://valkey.io/commands/strlen/ for more details.
+
+ Args:
+ key (TEncodable): The key to return its length.
+
+ Returns:
+ int: The length of the string value stored at `key`.
+ If `key` does not exist, it is treated as an empty string and 0 is returned.
+
+ Examples:
+ >>> client.set("key", "GLIDE")
+ >>> client.strlen("key")
+ 5 # Indicates that the length of the string value stored at `key` is 5.
+ """
+ args: List[TEncodable] = [key]
+ return cast(int, self._execute_command(RequestType.Strlen, args))
+
+ def rename(self, key: TEncodable, new_key: TEncodable) -> TOK:
+ """
+ Renames `key` to `new_key`.
+ If `newkey` already exists it is overwritten.
+ See https://valkey.io/commands/rename/ for more details.
+
+ Note:
+ When in cluster mode, both `key` and `newkey` must map to the same hash slot.
+
+ Args:
+ key (TEncodable) : The key to rename.
+ new_key (TEncodable) : The new name of the key.
+
+ Returns:
+ OK: If the `key` was successfully renamed, return "OK". If `key` does not exist, an error is thrown.
+ """
+ return cast(
+ TOK, self._execute_command(RequestType.Rename, [key, new_key])
+ )
+
+ def renamenx(self, key: TEncodable, new_key: TEncodable) -> bool:
+ """
+ Renames `key` to `new_key` if `new_key` does not yet exist.
+
+ See https://valkey.io/commands/renamenx for more details.
+
+ Note:
+ When in cluster mode, both `key` and `new_key` must map to the same hash slot.
+
+ Args:
+ key (TEncodable): The key to rename.
+ new_key (TEncodable): The new key name.
+
+ Returns:
+ bool: True if `key` was renamed to `new_key`, or False if `new_key` already exists.
+
+ Examples:
+ >>> client.renamenx("old_key", "new_key")
+ True # "old_key" was renamed to "new_key"
+ """
+ return cast(
+ bool,
+ self._execute_command(RequestType.RenameNX, [key, new_key]),
+ )
+
+ def delete(self, keys: List[TEncodable]) -> int:
+ """
+ Delete one or more keys from the database. A key is ignored if it does not exist.
+ See https://valkey.io/commands/del/ for details.
+
+ Note:
+ In cluster mode, if keys in `keys` map to different hash slots,
+ the command will be split across these slots and executed separately for each.
+ This means the command is atomic only at the slot level. If one or more slot-specific
+ requests fail, the entire call will return the first encountered error, even
+ though some requests may have succeeded while others did not.
+ If this behavior impacts your application logic, consider splitting the
+ request into sub-requests per slot to ensure atomicity.
+
+ Args:
+ keys (List[TEncodable]): A list of keys to be deleted from the database.
+
+ Returns:
+ int: The number of keys that were deleted.
+
+ Examples:
+ >>> client.set("key", "value")
+ >>> client.delete(["key"])
+ 1 # Indicates that the key was successfully deleted.
+ >>> client.delete(["key"])
+ 0 # No keys we're deleted since "key" doesn't exist.
+ """
+ return cast(int, self._execute_command(RequestType.Del, keys))
+
+ def incr(self, key: TEncodable) -> int:
+ """
+ Increments the number stored at `key` by one. If the key does not exist, it is set to 0 before performing the
+ operation.
+ See https://valkey.io/commands/incr/ for more details.
+
+ Args:
+ key (TEncodable): The key to increment its value.
+
+ Returns:
+ int: The value of `key` after the increment.
+
+ Examples:
+ >>> client.set("key", "10")
+ >>> client.incr("key")
+ 11
+ """
+ return cast(int, self._execute_command(RequestType.Incr, [key]))
+
+ def incrby(self, key: TEncodable, amount: int) -> int:
+ """
+ Increments the number stored at `key` by `amount`. If the key does not exist, it is set to 0 before performing
+ the operation. See https://valkey.io/commands/incrby/ for more details.
+
+ Args:
+ key (TEncodable): The key to increment its value.
+ amount (int) : The amount to increment.
+
+ Returns:
+ int: The value of key after the increment.
+
+ Example:
+ >>> client.set("key", "10")
+ >>> client.incrby("key" , 5)
+ 15
+ """
+ return cast(
+ int, self._execute_command(RequestType.IncrBy, [key, str(amount)])
+ )
+
+ def incrbyfloat(self, key: TEncodable, amount: float) -> float:
+ """
+ Increment the string representing a floating point number stored at `key` by `amount`.
+ By using a negative increment value, the value stored at the `key` is decremented.
+ If the key does not exist, it is set to 0 before performing the operation.
+ See https://valkey.io/commands/incrbyfloat/ for more details.
+
+ Args:
+ key (TEncodable): The key to increment its value.
+ amount (float) : The amount to increment.
+
+ Returns:
+ float: The value of key after the increment.
+
+ Examples:
+ >>> client.set("key", "10")
+ >>> client.incrbyfloat("key" , 5.5)
+ 15.55
+ """
+ return cast(
+ float,
+ self._execute_command(RequestType.IncrByFloat, [key, str(amount)]),
+ )
+
+ def setrange(self, key: TEncodable, offset: int, value: TEncodable) -> int:
+ """
+ Overwrites part of the string stored at `key`, starting at the specified
+ `offset`, for the entire length of `value`.
+ If the `offset` is larger than the current length of the string at `key`,
+ the string is padded with zero bytes to make `offset` fit. Creates the `key`
+ if it doesn't exist.
+
+ See https://valkey.io/commands/setrange for more details.
+
+ Args:
+ key (TEncodable): The key of the string to update.
+ offset (int): The position in the string where `value` should be written.
+ value (TEncodable): The value written with `offset`.
+
+ Returns:
+ int: The length of the string stored at `key` after it was modified.
+
+ Examples:
+ >>> client.set("key", "Hello World")
+ >>> client.setrange("key", 6, "Glide")
+ 11 # The length of the string stored at `key` after it was modified.
+ """
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.SetRange, [key, str(offset), value]
+ ),
+ )
+
+ def mset(self, key_value_map: Mapping[TEncodable, TEncodable]) -> TOK:
+ """
+ Set multiple keys to multiple values in a single atomic operation.
+ See https://valkey.io/commands/mset/ for more details.
+
+ Note:
+ In cluster mode, if keys in `key_value_map` map to different hash slots,
+ the command will be split across these slots and executed separately for each.
+ This means the command is atomic only at the slot level. If one or more slot-specific
+ requests fail, the entire call will return the first encountered error, even
+ though some requests may have succeeded while others did not.
+ If this behavior impacts your application logic, consider splitting the
+ request into sub-requests per slot to ensure atomicity.
+
+ Args:
+ key_value_map (Mapping[TEncodable, TEncodable]): A map of key value pairs.
+
+ Returns:
+ OK: a simple OK response.
+
+ Example:
+ >>> client.mset({"key" : "value", "key2": "value2"})
+ 'OK'
+ """
+ parameters: List[TEncodable] = []
+ for pair in key_value_map.items():
+ parameters.extend(pair)
+ return cast(TOK, self._execute_command(RequestType.MSet, parameters))
+
+ def msetnx(self, key_value_map: Mapping[TEncodable, TEncodable]) -> bool:
+ """
+ Sets multiple keys to values if the key does not exist. The operation is atomic, and if one or
+ more keys already exist, the entire operation fails.
+
+ Note:
+ When in cluster mode, all keys in `key_value_map` must map to the same hash slot.
+
+ See https://valkey.io/commands/msetnx/ for more details.
+
+ Args:
+ key_value_map (Mapping[TEncodable, TEncodable]): A key-value map consisting of keys and their respective values to set.
+
+ Returns:
+ bool: True if all keys were set. False if no key was set.
+
+ Examples:
+ >>> client.msetnx({"key1": "value1", "key2": "value2"})
+ True
+ >>> client.msetnx({"key2": "value4", "key3": "value5"})
+ False
+ """
+ parameters: List[TEncodable] = []
+ for pair in key_value_map.items():
+ parameters.extend(pair)
+ return cast(
+ bool,
+ self._execute_command(RequestType.MSetNX, parameters),
+ )
+
+ def mget(self, keys: List[TEncodable]) -> List[Optional[bytes]]:
+ """
+ Retrieve the values of multiple keys.
+ See https://valkey.io/commands/mget/ for more details.
+
+ Note:
+ In cluster mode, if keys in `keys` map to different hash slots,
+ the command will be split across these slots and executed separately for each.
+ This means the command is atomic only at the slot level. If one or more slot-specific
+ requests fail, the entire call will return the first encountered error, even
+ though some requests may have succeeded while others did not.
+ If this behavior impacts your application logic, consider splitting the
+ request into sub-requests per slot to ensure atomicity.
+ Args:
+ keys (List[TEncodable]): A list of keys to retrieve values for.
+
+ Returns:
+ List[Optional[bytes]]: A list of values corresponding to the provided keys. If a key is not found,
+ its corresponding value in the list will be None.
+
+ Examples:
+ >>> client.set("key1", "value1")
+ >>> client.set("key2", "value2")
+ >>> client.mget(["key1", "key2"])
+ [b'value1' , b'value2']
+ """
+ return cast(
+ List[Optional[bytes]], self._execute_command(RequestType.MGet, keys)
+ )
+
+ def decr(self, key: TEncodable) -> int:
+ """
+ Decrement the number stored at `key` by one. If the key does not exist, it is set to 0 before performing the
+ operation.
+ See https://valkey.io/commands/decr/ for more details.
+
+ Args:
+ key (TEncodable): The key to increment its value.
+
+ Returns:
+ int: The value of key after the decrement.
+
+ Examples:
+ >>> client.set("key", "10")
+ >>> client.decr("key")
+ 9
+ """
+ return cast(int, self._execute_command(RequestType.Decr, [key]))
+
+ def decrby(self, key: TEncodable, amount: int) -> int:
+ """
+ Decrements the number stored at `key` by `amount`. If the key does not exist, it is set to 0 before performing
+ the operation.
+ See https://valkey.io/commands/decrby/ for more details.
+
+ Args:
+ key (TEncodable): The key to decrement its value.
+ amount (int) : The amount to decrement.
+
+ Returns:
+ int: The value of key after the decrement.
+
+ Example:
+ >>> client.set("key", "10")
+ >>> client.decrby("key" , 5)
+ 5
+ """
+ return cast(
+ int, self._execute_command(RequestType.DecrBy, [key, str(amount)])
+ )
+
+ def touch(self, keys: List[TEncodable]) -> int:
+ """
+ Updates the last access time of specified keys.
+
+ See https://valkey.io/commands/touch/ for details.
+
+ Note:
+ In cluster mode, if keys in `key_value_map` map to different hash slots,
+ the command will be split across these slots and executed separately for each.
+ This means the command is atomic only at the slot level. If one or more slot-specific
+ requests fail, the entire call will return the first encountered error, even
+ though some requests may have succeeded while others did not.
+ If this behavior impacts your application logic, consider splitting the
+ request into sub-requests per slot to ensure atomicity. Args:
+ keys (List[TEncodable]): The list of keys to unlink.
+
+ Args:
+ keys (List[TEncodable]): The keys to update last access time.
+
+ Returns:
+ int: The number of keys that were updated, a key is ignored if it doesn't exist.
+
+ Examples:
+ >>> client.set("myKey1", "value1")
+ >>> client.set("myKey2", "value2")
+ >>> client.touch(["myKey1", "myKey2", "nonExistentKey"])
+ 2 # Last access time of 2 keys has been updated.
+ """
+ return cast(int, self._execute_command(RequestType.Touch, keys))
+
+ def hset(
+ self,
+ key: TEncodable,
+ field_value_map: Mapping[TEncodable, TEncodable],
+ ) -> int:
+ """
+ Sets the specified fields to their respective values in the hash stored at `key`.
+ See https://valkey.io/commands/hset/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ field_value_map (Mapping[TEncodable, TEncodable]): A field-value map consisting of fields and their corresponding values
+ to be set in the hash stored at the specified key.
+
+ Returns:
+ int: The number of fields that were added to the hash.
+
+ Example:
+ >>> client.hset("my_hash", {"field": "value", "field2": "value2"})
+ 2 # Indicates that 2 fields were successfully set in the hash "my_hash".
+ """
+ field_value_list: List[TEncodable] = [key]
+ for pair in field_value_map.items():
+ field_value_list.extend(pair)
+ return cast(
+ int,
+ self._execute_command(RequestType.HSet, field_value_list),
+ )
+
+ def hget(self, key: TEncodable, field: TEncodable) -> Optional[bytes]:
+ """
+ Retrieves the value associated with `field` in the hash stored at `key`.
+ See https://valkey.io/commands/hget/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ field (TEncodable): The field whose value should be retrieved.
+
+ Returns:
+ Optional[bytes]: The value associated `field` in the hash.
+ Returns None if `field` is not presented in the hash or `key` does not exist.
+
+ Examples:
+ >>> client.hset("my_hash", "field", "value")
+ >>> client.hget("my_hash", "field")
+ b"value"
+ >>> client.hget("my_hash", "nonexistent_field")
+ None
+ """
+ return cast(
+ Optional[bytes],
+ self._execute_command(RequestType.HGet, [key, field]),
+ )
+
+ def hsetnx(
+ self,
+ key: TEncodable,
+ field: TEncodable,
+ value: TEncodable,
+ ) -> bool:
+ """
+ Sets `field` in the hash stored at `key` to `value`, only if `field` does not yet exist.
+ If `key` does not exist, a new key holding a hash is created.
+ If `field` already exists, this operation has no effect.
+ See https://valkey.io/commands/hsetnx/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ field (TEncodable): The field to set the value for.
+ value (TEncodable): The value to set.
+
+ Returns:
+ bool: True if the field was set, False if the field already existed and was not set.
+
+ Examples:
+ >>> client.hsetnx("my_hash", "field", "value")
+ True # Indicates that the field "field" was set successfully in the hash "my_hash".
+ >>> client.hsetnx("my_hash", "field", "new_value")
+ False # Indicates that the field "field" already existed in the hash "my_hash" and was not set again.
+ """
+ return cast(
+ bool,
+ self._execute_command(RequestType.HSetNX, [key, field, value]),
+ )
+
+ def hincrby(self, key: TEncodable, field: TEncodable, amount: int) -> int:
+ """
+ Increment or decrement the value of a `field` in the hash stored at `key` by the specified amount.
+ By using a negative increment value, the value stored at `field` in the hash stored at `key` is decremented.
+ If `field` or `key` does not exist, it is set to 0 before performing the operation.
+ See https://valkey.io/commands/hincrby/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ field (TEncodable): The field in the hash stored at `key` to increment or decrement its value.
+ amount (int): The amount by which to increment or decrement the field's value.
+ Use a negative value to decrement.
+
+ Returns:
+ int: The value of the specified field in the hash stored at `key` after the increment or decrement.
+
+ Examples:
+ >>> client.hincrby("my_hash", "field1", 5)
+ 5
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.HIncrBy, [key, field, str(amount)]),
+ )
+
+ def hincrbyfloat(
+ self, key: TEncodable, field: TEncodable, amount: float
+ ) -> float:
+ """
+ Increment or decrement the floating-point value stored at `field` in the hash stored at `key` by the specified
+ amount.
+ By using a negative increment value, the value stored at `field` in the hash stored at `key` is decremented.
+ If `field` or `key` does not exist, it is set to 0 before performing the operation.
+ See https://valkey.io/commands/hincrbyfloat/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ field (TEncodable): The field in the hash stored at `key` to increment or decrement its value.
+ amount (float): The amount by which to increment or decrement the field's value.
+ Use a negative value to decrement.
+
+ Returns:
+ float: The value of the specified field in the hash stored at `key` after the increment as a string.
+
+ Examples:
+ >>> client.hincrbyfloat("my_hash", "field1", 2.5)
+ "2.5"
+ """
+ return cast(
+ float,
+ self._execute_command(
+ RequestType.HIncrByFloat, [key, field, str(amount)]
+ ),
+ )
+
+ def hexists(self, key: TEncodable, field: TEncodable) -> bool:
+ """
+ Check if a field exists in the hash stored at `key`.
+ See https://valkey.io/commands/hexists/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ field (TEncodable): The field to check in the hash stored at `key`.
+
+ Returns:
+ bool: Returns 'True' if the hash contains the specified field. If the hash does not contain the field,
+ or if the key does not exist, it returns 'False'.
+
+ Examples:
+ >>> client.hexists("my_hash", "field1")
+ True
+ >>> client.hexists("my_hash", "nonexistent_field")
+ False
+ """
+ return cast(
+ bool, self._execute_command(RequestType.HExists, [key, field])
+ )
+
+ def hgetall(self, key: TEncodable) -> Dict[bytes, bytes]:
+ """
+ Returns all fields and values of the hash stored at `key`.
+ See https://valkey.io/commands/hgetall/ for details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+
+ Returns:
+ Dict[bytes, bytes]: A dictionary of fields and their values stored in the hash. Every field name in the list is followed by
+ its value.
+ If `key` does not exist, it returns an empty dictionary.
+
+ Examples:
+ >>> client.hgetall("my_hash")
+ {b"field1": b"value1", b"field2": b"value2"}
+ """
+ return cast(
+ Dict[bytes, bytes], self._execute_command(RequestType.HGetAll, [key])
+ )
+
+ def hmget(
+ self, key: TEncodable, fields: List[TEncodable]
+ ) -> List[Optional[bytes]]:
+ """
+ Retrieve the values associated with specified fields in the hash stored at `key`.
+ See https://valkey.io/commands/hmget/ for details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ fields (List[TEncodable]): The list of fields in the hash stored at `key` to retrieve from the database.
+
+ Returns:
+ List[Optional[bytes]]: A list of values associated with the given fields, in the same order as they are requested.
+ For every field that does not exist in the hash, a null value is returned.
+ If `key` does not exist, it is treated as an empty hash, and the function returns a list of null values.
+
+ Examples:
+ >>> client.hmget("my_hash", ["field1", "field2"])
+ [b"value1", b"value2"] # A list of values associated with the specified fields.
+ """
+ return cast(
+ List[Optional[bytes]],
+ self._execute_command(RequestType.HMGet, [key] + fields),
+ )
+
+ def hdel(self, key: TEncodable, fields: List[TEncodable]) -> int:
+ """
+ Remove specified fields from the hash stored at `key`.
+ See https://valkey.io/commands/hdel/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ fields (List[TEncodable]): The list of fields to remove from the hash stored at `key`.
+
+ Returns:
+ int: The number of fields that were removed from the hash, excluding specified but non-existing fields.
+ If `key` does not exist, it is treated as an empty hash, and the function returns 0.
+
+ Examples:
+ >>> client.hdel("my_hash", ["field1", "field2"])
+ 2 # Indicates that two fields were successfully removed from the hash.
+ """
+ return cast(int, self._execute_command(RequestType.HDel, [key] + fields))
+
+ def hlen(self, key: TEncodable) -> int:
+ """
+ Returns the number of fields contained in the hash stored at `key`.
+
+ See https://valkey.io/commands/hlen/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+
+ Returns:
+ int: The number of fields in the hash, or 0 when the key does not exist.
+ If `key` holds a value that is not a hash, an error is returned.
+
+ Examples:
+ >>> client.hlen("my_hash")
+ 3
+ >>> client.hlen("non_existing_key")
+ 0
+ """
+ return cast(int, self._execute_command(RequestType.HLen, [key]))
+
+ def hvals(self, key: TEncodable) -> List[bytes]:
+ """
+ Returns all values in the hash stored at `key`.
+
+ See https://valkey.io/commands/hvals/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+
+ Returns:
+ List[bytes]: A list of values in the hash, or an empty list when the key does not exist.
+
+ Examples:
+ >>> client.hvals("my_hash")
+ [b"value1", b"value2", b"value3"] # Returns all the values stored in the hash "my_hash".
+ """
+ return cast(List[bytes], self._execute_command(RequestType.HVals, [key]))
+
+ def hkeys(self, key: TEncodable) -> List[bytes]:
+ """
+ Returns all field names in the hash stored at `key`.
+
+ See https://valkey.io/commands/hkeys/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+
+ Returns:
+ List[bytes]: A list of field names for the hash, or an empty list when the key does not exist.
+
+ Examples:
+ >>> client.hkeys("my_hash")
+ [b"field1", b"field2", b"field3"] # Returns all the field names stored in the hash "my_hash".
+ """
+ return cast(List[bytes], self._execute_command(RequestType.HKeys, [key]))
+
+ def hrandfield(self, key: TEncodable) -> Optional[bytes]:
+ """
+ Returns a random field name from the hash value stored at `key`.
+
+ See https://valkey.io/commands/hrandfield for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+
+ Returns:
+ Optional[bytes]: A random field name from the hash stored at `key`.
+ If the hash does not exist or is empty, None will be returned.
+
+ Examples:
+ >>> client.hrandfield("my_hash")
+ b"field1" # A random field name stored in the hash "my_hash".
+ """
+ return cast(
+ Optional[bytes], self._execute_command(RequestType.HRandField, [key])
+ )
+
+ def hrandfield_count(self, key: TEncodable, count: int) -> List[bytes]:
+ """
+ Retrieves up to `count` random field names from the hash value stored at `key`.
+
+ See https://valkey.io/commands/hrandfield for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ count (int): The number of field names to return.
+ If `count` is positive, returns unique elements.
+ If `count` is negative, allows for duplicates elements.
+
+ Returns:
+ List[bytes]: A list of random field names from the hash.
+ If the hash does not exist or is empty, the response will be an empty list.
+
+ Examples:
+ >>> client.hrandfield_count("my_hash", -3)
+ [b"field1", b"field1", b"field2"] # Non-distinct, random field names stored in the hash "my_hash".
+ >>> client.hrandfield_count("non_existing_hash", 3)
+ [] # Empty list
+ """
+ return cast(
+ List[bytes],
+ self._execute_command(RequestType.HRandField, [key, str(count)]),
+ )
+
+ def hrandfield_withvalues(
+ self, key: TEncodable, count: int
+ ) -> List[List[bytes]]:
+ """
+ Retrieves up to `count` random field names along with their values from the hash value stored at `key`.
+
+ See https://valkey.io/commands/hrandfield for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ count (int): The number of field names to return.
+ If `count` is positive, returns unique elements.
+ If `count` is negative, allows for duplicates elements.
+
+ Returns:
+ List[List[bytes]]: A list of `[field_name, value]` lists, where `field_name` is a random field name from the
+ hash and `value` is the associated value of the field name.
+ If the hash does not exist or is empty, the response will be an empty list.
+
+ Examples:
+ >>> client.hrandfield_withvalues("my_hash", -3)
+ [[b"field1", b"value1"], [b"field1", b"value1"], [b"field2", b"value2"]]
+ """
+ return cast(
+ List[List[bytes]],
+ self._execute_command(
+ RequestType.HRandField, [key, str(count), "WITHVALUES"]
+ ),
+ )
+
+ def hstrlen(self, key: TEncodable, field: TEncodable) -> int:
+ """
+ Returns the string length of the value associated with `field` in the hash stored at `key`.
+
+ See https://valkey.io/commands/hstrlen/ for more details.
+
+ Args:
+ key (TEncodable): The key of the hash.
+ field (TEncodable): The field in the hash.
+
+ Returns:
+ int: The string length or 0 if `field` or `key` does not exist.
+
+ Examples:
+ >>> client.hset("my_hash", "field", "value")
+ >>> client.hstrlen("my_hash", "my_field")
+ 5
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.HStrlen, [key, field]),
+ )
+
+ def lpush(self, key: TEncodable, elements: List[TEncodable]) -> int:
+ """
+ Insert all the specified values at the head of the list stored at `key`.
+ `elements` are inserted one after the other to the head of the list, from the leftmost element
+ to the rightmost element. If `key` does not exist, it is created as empty list before performing the push operations.
+ See https://valkey.io/commands/lpush/ for more details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ elements (List[TEncodable]): The elements to insert at the head of the list stored at `key`.
+
+ Returns:
+ int: The length of the list after the push operations.
+
+ Examples:
+ >>> client.lpush("my_list", ["value2", "value3"])
+ 3 # Indicates that the new length of the list is 3 after the push operation.
+ >>> client.lpush("nonexistent_list", ["new_value"])
+ 1
+ """
+ return cast(
+ int, self._execute_command(RequestType.LPush, [key] + elements)
+ )
+
+ def lpushx(self, key: TEncodable, elements: List[TEncodable]) -> int:
+ """
+ Inserts all the specified values at the head of the list stored at `key`, only if `key` exists and holds a list.
+ If `key` is not a list, this performs no operation.
+
+ See https://valkey.io/commands/lpushx/ for more details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ elements (List[TEncodable]): The elements to insert at the head of the list stored at `key`.
+
+ Returns:
+ int: The length of the list after the push operation.
+
+ Examples:
+ >>> client.lpushx("my_list", ["value1", "value2"])
+ 3 # Indicates that 2 elements we're added to the list "my_list", and the new length of the list is 3.
+ >>> client.lpushx("nonexistent_list", ["new_value"])
+ 0 # Indicates that the list "nonexistent_list" does not exist, so "new_value" could not be pushed.
+ """
+ return cast(
+ int, self._execute_command(RequestType.LPushX, [key] + elements)
+ )
+
+ def lpop(self, key: TEncodable) -> Optional[bytes]:
+ """
+ Remove and return the first elements of the list stored at `key`.
+ The command pops a single element from the beginning of the list.
+ See https://valkey.io/commands/lpop/ for details.
+
+ Args:
+ key (TEncodable): The key of the list.
+
+ Returns:
+ Optional[bytes]: The value of the first element.
+ If `key` does not exist, None will be returned.
+
+ Examples:
+ >>> client.lpop("my_list")
+ b"value1"
+ >>> client.lpop("non_exiting_key")
+ None
+ """
+ return cast(
+ Optional[bytes],
+ self._execute_command(RequestType.LPop, [key]),
+ )
+
+ def lpop_count(self, key: TEncodable, count: int) -> Optional[List[bytes]]:
+ """
+ Remove and return up to `count` elements from the list stored at `key`, depending on the list's length.
+ See https://valkey.io/commands/lpop/ for details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ count (int): The count of elements to pop from the list.
+
+ Returns:
+ Optional[List[bytes]]: A a list of popped elements will be returned depending on the list's length.
+ If `key` does not exist, None will be returned.
+
+ Examples:
+ >>> client.lpop_count("my_list", 2)
+ [b"value1", b"value2"]
+ >>> client.lpop_count("non_exiting_key" , 3)
+ None
+ """
+ return cast(
+ Optional[List[bytes]],
+ self._execute_command(RequestType.LPop, [key, str(count)]),
+ )
+
+ def blpop(
+ self, keys: List[TEncodable], timeout: float
+ ) -> Optional[List[bytes]]:
+ """
+ Pops an element from the head of the first list that is non-empty, with the given keys being checked in the
+ order that they are given. Blocks the connection when there are no elements to pop from any of the given lists.
+ See https://valkey.io/commands/blpop for details.
+
+ Notes:
+ 1. When in cluster mode, all `keys` must map to the same hash slot.
+ 2. `BLPOP` is a client blocking command, see https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#blocking-commands for more details and best practices.
+
+ Args:
+ keys (List[TEncodable]): The keys of the lists to pop from.
+ timeout (float): The number of seconds to wait for a blocking operation to complete. A value of 0 will block indefinitely.
+
+ Returns:
+ Optional[List[bytes]]: A two-element list containing the key from which the element was popped and the value of the
+ popped element, formatted as `[key, value]`. If no element could be popped and the `timeout` expired, returns None.
+
+ Examples:
+ >>> client.blpop(["list1", "list2"], 0.5)
+ [b"list1", b"element"] # "element" was popped from the head of the list with key "list1"
+ """
+ return cast(
+ Optional[List[bytes]],
+ self._execute_command(RequestType.BLPop, keys + [str(timeout)]),
+ )
+
+ def lmpop(
+ self,
+ keys: List[TEncodable],
+ direction: ListDirection,
+ count: Optional[int] = None,
+ ) -> Optional[Mapping[bytes, List[bytes]]]:
+ """
+ Pops one or more elements from the first non-empty list from the provided `keys`.
+
+ When in cluster mode, all `keys` must map to the same hash slot.
+
+ See https://valkey.io/commands/lmpop/ for details.
+
+ Args:
+ keys (List[TEncodable]): An array of keys of lists.
+ direction (ListDirection): The direction based on which elements are popped from (`ListDirection.LEFT` or `ListDirection.RIGHT`).
+ count (Optional[int]): The maximum number of popped elements. If not provided, defaults to popping a single element.
+
+ Returns:
+ Optional[Mapping[bytes, List[bytes]]]: A map of `key` name mapped to an array of popped elements, or None if no elements could be popped.
+
+ Examples:
+ >>> client.lpush("testKey", ["one", "two", "three"])
+ >>> client.lmpop(["testKey"], ListDirection.LEFT, 2)
+ {b"testKey": [b"three", b"two"]}
+
+ Since: Valkey version 7.0.0.
+ """
+ args = [str(len(keys)), *keys, direction.value]
+ if count is not None:
+ args += ["COUNT", str(count)]
+
+ return cast(
+ Optional[Mapping[bytes, List[bytes]]],
+ self._execute_command(RequestType.LMPop, args),
+ )
+
+ def blmpop(
+ self,
+ keys: List[TEncodable],
+ direction: ListDirection,
+ timeout: float,
+ count: Optional[int] = None,
+ ) -> Optional[Mapping[bytes, List[bytes]]]:
+ """
+ Blocks the connection until it pops one or more elements from the first non-empty list from the provided `keys`.
+
+ `BLMPOP` is the blocking variant of `LMPOP`.
+
+ Notes:
+ 1. When in cluster mode, all `keys` must map to the same hash slot.
+ 2. `BLMPOP` is a client blocking command, see https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#blocking-commands for more details and best practices.
+
+ See https://valkey.io/commands/blmpop/ for details.
+
+ Args:
+ keys (List[TEncodable]): An array of keys of lists.
+ direction (ListDirection): The direction based on which elements are popped from (`ListDirection.LEFT` or `ListDirection.RIGHT`).
+ timeout (float): The number of seconds to wait for a blocking operation to complete. A value of `0` will block indefinitely.
+ count (Optional[int]): The maximum number of popped elements. If not provided, defaults to popping a single element.
+
+ Returns:
+ Optional[Mapping[bytes, List[bytes]]]: A map of `key` name mapped to an array of popped elements, or None if no elements could be popped and the timeout expired.
+
+ Examples:
+ >>> client.lpush("testKey", ["one", "two", "three"])
+ >>> client.blmpop(["testKey"], ListDirection.LEFT, 0.1, 2)
+ {b"testKey": [b"three", b"two"]}
+
+ Since: Valkey version 7.0.0.
+ """
+ args = [str(timeout), str(len(keys)), *keys, direction.value]
+ if count is not None:
+ args += ["COUNT", str(count)]
+
+ return cast(
+ Optional[Mapping[bytes, List[bytes]]],
+ self._execute_command(RequestType.BLMPop, args),
+ )
+
+ def lrange(self, key: TEncodable, start: int, end: int) -> List[bytes]:
+ """
+ Retrieve the specified elements of the list stored at `key` within the given range.
+ The offsets `start` and `end` are zero-based indexes, with 0 being the first element of the list, 1 being the next
+ element and so on. These offsets can also be negative numbers indicating offsets starting at the end of the list,
+ with -1 being the last element of the list, -2 being the penultimate, and so on.
+ See https://valkey.io/commands/lrange/ for details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ start (int): The starting point of the range.
+ end (int): The end of the range.
+
+ Returns:
+ List[bytes]: A list of elements within the specified range.
+ If `start` exceeds the `end` of the list, or if `start` is greater than `end`, an empty list will be returned.
+ If `end` exceeds the actual end of the list, the range will stop at the actual end of the list.
+ If `key` does not exist an empty list will be returned.
+
+ Examples:
+ >>> client.lrange("my_list", 0, 2)
+ [b"value1", b"value2", b"value3"]
+ >>> client.lrange("my_list", -2, -1)
+ [b"value2", b"value3"]
+ >>> client.lrange("non_exiting_key", 0, 2)
+ []
+ """
+ return cast(
+ List[bytes],
+ self._execute_command(
+ RequestType.LRange, [key, str(start), str(end)]
+ ),
+ )
+
+ def lindex(
+ self,
+ key: TEncodable,
+ index: int,
+ ) -> Optional[bytes]:
+ """
+ Returns the element at `index` in the list stored at `key`.
+
+ The index is zero-based, so 0 means the first element, 1 the second element and so on.
+ Negative indices can be used to designate elements starting at the tail of the list.
+ Here, -1 means the last element, -2 means the penultimate and so forth.
+
+ See https://valkey.io/commands/lindex/ for more details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ index (int): The index of the element in the list to retrieve.
+
+ Returns:
+ Optional[bytes]: The element at `index` in the list stored at `key`.
+ If `index` is out of range or if `key` does not exist, None is returned.
+
+ Examples:
+ >>> client.lindex("my_list", 0)
+ b'value1' # Returns the first element in the list stored at 'my_list'.
+ >>> client.lindex("my_list", -1)
+ b'value3' # Returns the last element in the list stored at 'my_list'.
+ """
+ return cast(
+ Optional[bytes],
+ self._execute_command(RequestType.LIndex, [key, str(index)]),
+ )
+
+ def lset(self, key: TEncodable, index: int, element: TEncodable) -> TOK:
+ """
+ Sets the list element at `index` to `element`.
+
+ The index is zero-based, so `0` means the first element, `1` the second element and so on.
+ Negative indices can be used to designate elements starting at the tail of the list.
+ Here, `-1` means the last element, `-2` means the penultimate and so forth.
+
+ See https://valkey.io/commands/lset/ for details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ index (int): The index of the element in the list to be set.
+ element (TEncodable): The new element to set at the specified index.
+
+ Returns:
+ TOK: A simple `OK` response.
+
+ Examples:
+ >>> client.lset("testKey", 1, "two")
+ OK
+ """
+ return cast(
+ TOK,
+ self._execute_command(RequestType.LSet, [key, str(index), element]),
+ )
+
+ def rpush(self, key: TEncodable, elements: List[TEncodable]) -> int:
+ """
+ Inserts all the specified values at the tail of the list stored at `key`.
+ `elements` are inserted one after the other to the tail of the list, from the leftmost element
+ to the rightmost element. If `key` does not exist, it is created as empty list before performing the push operations.
+ See https://valkey.io/commands/rpush/ for more details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ elements (List[TEncodable]): The elements to insert at the tail of the list stored at `key`.
+
+ Returns:
+ int: The length of the list after the push operations.
+
+ Examples:
+ >>> client.rpush("my_list", ["value2", "value3"])
+ 3 # Indicates that the new length of the list is 3 after the push operation.
+ >>> client.rpush("nonexistent_list", ["new_value"])
+ 1
+ """
+ return cast(
+ int, self._execute_command(RequestType.RPush, [key] + elements)
+ )
+
+ def rpushx(self, key: TEncodable, elements: List[TEncodable]) -> int:
+ """
+ Inserts all the specified values at the tail of the list stored at `key`, only if `key` exists and holds a list.
+ If `key` is not a list, this performs no operation.
+
+ See https://valkey.io/commands/rpushx/ for more details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ elements (List[TEncodable]): The elements to insert at the tail of the list stored at `key`.
+
+ Returns:
+ int: The length of the list after the push operation.
+
+ Examples:
+ >>> client.rpushx("my_list", ["value1", "value2"])
+ 3 # Indicates that 2 elements we're added to the list "my_list", and the new length of the list is 3.
+ >>> client.rpushx("nonexistent_list", ["new_value"])
+ 0 # Indicates that the list "nonexistent_list" does not exist, so "new_value" could not be pushed.
+ """
+ return cast(
+ int, self._execute_command(RequestType.RPushX, [key] + elements)
+ )
+
+ def rpop(self, key: TEncodable) -> Optional[bytes]:
+ """
+ Removes and returns the last elements of the list stored at `key`.
+ The command pops a single element from the end of the list.
+ See https://valkey.io/commands/rpop/ for details.
+
+ Args:
+ key (TEncodable): The key of the list.
+
+ Returns:
+ Optional[bytes]: The value of the last element.
+ If `key` does not exist, None will be returned.
+
+ Examples:
+ >>> client.rpop("my_list")
+ b"value1"
+ >>> client.rpop("non_exiting_key")
+ None
+ """
+ return cast(
+ Optional[bytes],
+ self._execute_command(RequestType.RPop, [key]),
+ )
+
+ def rpop_count(self, key: TEncodable, count: int) -> Optional[List[bytes]]:
+ """
+ Removes and returns up to `count` elements from the list stored at `key`, depending on the list's length.
+ See https://valkey.io/commands/rpop/ for details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ count (int): The count of elements to pop from the list.
+
+ Returns:
+ Optional[List[bytes]: A list of popped elements will be returned depending on the list's length.
+ If `key` does not exist, None will be returned.
+
+ Examples:
+ >>> client.rpop_count("my_list", 2)
+ [b"value1", b"value2"]
+ >>> client.rpop_count("non_exiting_key" , 7)
+ None
+ """
+ return cast(
+ Optional[List[bytes]],
+ self._execute_command(RequestType.RPop, [key, str(count)]),
+ )
+
+ def brpop(
+ self, keys: List[TEncodable], timeout: float
+ ) -> Optional[List[bytes]]:
+ """
+ Pops an element from the tail of the first list that is non-empty, with the given keys being checked in the
+ order that they are given. Blocks the connection when there are no elements to pop from any of the given lists.
+ See https://valkey.io/commands/brpop for details.
+
+ Notes:
+ 1. When in cluster mode, all `keys` must map to the same hash slot.
+ 2. `BRPOP` is a client blocking command, see https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#blocking-commands for more details and best practices.
+
+ Args:
+ keys (List[TEncodable]): The keys of the lists to pop from.
+ timeout (float): The number of seconds to wait for a blocking operation to complete. A value of 0 will block indefinitely.
+
+ Returns:
+ Optional[List[bytes]]: A two-element list containing the key from which the element was popped and the value of the
+ popped element, formatted as `[key, value]`. If no element could be popped and the `timeout` expired, returns None.
+
+ Examples:
+ >>> client.brpop(["list1", "list2"], 0.5)
+ [b"list1", b"element"] # "element" was popped from the tail of the list with key "list1"
+ """
+ return cast(
+ Optional[List[bytes]],
+ self._execute_command(RequestType.BRPop, keys + [str(timeout)]),
+ )
+
+ def linsert(
+ self,
+ key: TEncodable,
+ position: InsertPosition,
+ pivot: TEncodable,
+ element: TEncodable,
+ ) -> int:
+ """
+ Inserts `element` in the list at `key` either before or after the `pivot`.
+
+ See https://valkey.io/commands/linsert/ for details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ position (InsertPosition): The relative position to insert into - either `InsertPosition.BEFORE` or
+ `InsertPosition.AFTER` the `pivot`.
+ pivot (TEncodable): An element of the list.
+ element (TEncodable): The new element to insert.
+
+ Returns:
+ int: The list length after a successful insert operation.
+ If the `key` doesn't exist returns `-1`.
+ If the `pivot` wasn't found, returns `0`.
+
+ Examples:
+ >>> client.linsert("my_list", InsertPosition.BEFORE, "World", "There")
+ 3 # "There" was inserted before "World", and the new length of the list is 3.
+ """
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.LInsert, [key, position.value, pivot, element]
+ ),
+ )
+
+ def lmove(
+ self,
+ source: TEncodable,
+ destination: TEncodable,
+ where_from: ListDirection,
+ where_to: ListDirection,
+ ) -> Optional[bytes]:
+ """
+ Atomically pops and removes the left/right-most element to the list stored at `source`
+ depending on `where_from`, and pushes the element at the first/last element of the list
+ stored at `destination` depending on `where_to`.
+
+ When in cluster mode, both `source` and `destination` must map to the same hash slot.
+
+ See https://valkey.io/commands/lmove/ for details.
+
+ Args:
+ source (TEncodable): The key to the source list.
+ destination (TEncodable): The key to the destination list.
+ where_from (ListDirection): The direction to remove the element from (`ListDirection.LEFT` or `ListDirection.RIGHT`).
+ where_to (ListDirection): The direction to add the element to (`ListDirection.LEFT` or `ListDirection.RIGHT`).
+
+ Returns:
+ Optional[bytes]: The popped element, or None if `source` does not exist.
+
+ Examples:
+ >>> client.lpush("testKey1", ["two", "one"])
+ >>> client.lpush("testKey2", ["four", "three"])
+ >>> client.lmove("testKey1", "testKey2", ListDirection.LEFT, ListDirection.LEFT)
+ b"one"
+ >>> updated_array1 = client.lrange("testKey1", 0, -1)
+ [b"two"]
+ >>> client.lrange("testKey2", 0, -1)
+ [b"one", b"three", b"four"]
+
+ Since: Valkey version 6.2.0.
+ """
+ return cast(
+ Optional[bytes],
+ self._execute_command(
+ RequestType.LMove,
+ [source, destination, where_from.value, where_to.value],
+ ),
+ )
+
+ def blmove(
+ self,
+ source: TEncodable,
+ destination: TEncodable,
+ where_from: ListDirection,
+ where_to: ListDirection,
+ timeout: float,
+ ) -> Optional[bytes]:
+ """
+ Blocks the connection until it pops atomically and removes the left/right-most element to the
+ list stored at `source` depending on `where_from`, and pushes the element at the first/last element
+ of the list stored at `destination` depending on `where_to`.
+ `BLMOVE` is the blocking variant of `LMOVE`.
+
+ Notes:
+ 1. When in cluster mode, both `source` and `destination` must map to the same hash slot.
+ 2. `BLMOVE` is a client blocking command, see https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#blocking-commands for more details and best practices.
+
+ See https://valkey.io/commands/blmove/ for details.
+
+ Args:
+ source (TEncodable): The key to the source list.
+ destination (TEncodable): The key to the destination list.
+ where_from (ListDirection): The direction to remove the element from (`ListDirection.LEFT` or `ListDirection.RIGHT`).
+ where_to (ListDirection): The direction to add the element to (`ListDirection.LEFT` or `ListDirection.RIGHT`).
+ timeout (float): The number of seconds to wait for a blocking operation to complete. A value of `0` will block indefinitely.
+
+ Returns:
+ Optional[bytes]: The popped element, or None if `source` does not exist or if the operation timed-out.
+
+ Examples:
+ >>> client.lpush("testKey1", ["two", "one"])
+ >>> client.lpush("testKey2", ["four", "three"])
+ >>> client.blmove("testKey1", "testKey2", ListDirection.LEFT, ListDirection.LEFT, 0.1)
+ b"one"
+ >>> client.lrange("testKey1", 0, -1)
+ [b"two"]
+ >>> updated_array2 = client.lrange("testKey2", 0, -1)
+ [b"one", b"three", bb"four"]
+
+ Since: Valkey version 6.2.0.
+ """
+ return cast(
+ Optional[bytes],
+ self._execute_command(
+ RequestType.BLMove,
+ [source, destination, where_from.value, where_to.value, str(timeout)],
+ ),
+ )
+
+ def sadd(self, key: TEncodable, members: List[TEncodable]) -> int:
+ """
+ Add specified members to the set stored at `key`.
+ Specified members that are already a member of this set are ignored.
+ If `key` does not exist, a new set is created before adding `members`.
+ See https://valkey.io/commands/sadd/ for more details.
+
+ Args:
+ key (TEncodable): The key where members will be added to its set.
+ members (List[TEncodable]): A list of members to add to the set stored at `key`.
+
+ Returns:
+ int: The number of members that were added to the set, excluding members already present.
+
+ Examples:
+ >>> client.sadd("my_set", ["member1", "member2"])
+ 2
+ """
+ return cast(int, self._execute_command(RequestType.SAdd, [key] + members))
+
+ def srem(self, key: TEncodable, members: List[TEncodable]) -> int:
+ """
+ Remove specified members from the set stored at `key`.
+ Specified members that are not a member of this set are ignored.
+ See https://valkey.io/commands/srem/ for details.
+
+ Args:
+ key (TEncodable): The key from which members will be removed.
+ members (List[TEncodable]): A list of members to remove from the set stored at `key`.
+
+ Returns:
+ int: The number of members that were removed from the set, excluding non-existing members.
+ If `key` does not exist, it is treated as an empty set and this command returns 0.
+
+ Examples:
+ >>> client.srem("my_set", ["member1", "member2"])
+ 2
+ """
+ return cast(int, self._execute_command(RequestType.SRem, [key] + members))
+
+ def smembers(self, key: TEncodable) -> Set[bytes]:
+ """
+ Retrieve all the members of the set value stored at `key`.
+ See https://valkey.io/commands/smembers/ for details.
+
+ Args:
+ key (TEncodable): The key from which to retrieve the set members.
+
+ Returns:
+ Set[bytes]: A set of all members of the set.
+ If `key` does not exist an empty set will be returned.
+
+ Examples:
+ >>> client.smembers("my_set")
+ {b"member1", b"member2", b"member3"}
+ """
+ return cast(
+ Set[bytes], self._execute_command(RequestType.SMembers, [key])
+ )
+
+ def scard(self, key: TEncodable) -> int:
+ """
+ Retrieve the set cardinality (number of elements) of the set stored at `key`.
+ See https://valkey.io/commands/scard/ for details.
+
+ Args:
+ key (TEncodable): The key from which to retrieve the number of set members.
+
+ Returns:
+ int: The cardinality (number of elements) of the set, or 0 if the key does not exist.
+
+ Examples:
+ >>> client.scard("my_set")
+ 3
+ """
+ return cast(int, self._execute_command(RequestType.SCard, [key]))
+
+ def spop(self, key: TEncodable) -> Optional[bytes]:
+ """
+ Removes and returns one random member from the set stored at `key`.
+
+ See https://valkey-io.github.io/commands/spop/ for more details.
+ To pop multiple members, see `spop_count`.
+
+ Args:
+ key (TEncodable): The key of the set.
+
+ Returns:
+ Optional[bytes]: The value of the popped member.
+ If `key` does not exist, None will be returned.
+
+ Examples:
+ >>> client.spop("my_set")
+ b"value1" # Removes and returns a random member from the set "my_set".
+ >>> client.spop("non_exiting_key")
+ None
+ """
+ return cast(
+ Optional[bytes], self._execute_command(RequestType.SPop, [key])
+ )
+
+ def spop_count(self, key: TEncodable, count: int) -> Set[bytes]:
+ """
+ Removes and returns up to `count` random members from the set stored at `key`, depending on the set's length.
+
+ See https://valkey-io.github.io/commands/spop/ for more details.
+ To pop a single member, see `spop`.
+
+ Args:
+ key (TEncodable): The key of the set.
+ count (int): The count of the elements to pop from the set.
+
+ Returns:
+ Set[bytes]: A set of popped elements will be returned depending on the set's length.
+ If `key` does not exist, an empty set will be returned.
+
+ Examples:
+ >>> client.spop_count("my_set", 2)
+ {b"value1", b"value2"} # Removes and returns 2 random members from the set "my_set".
+ >>> client.spop_count("non_exiting_key", 2)
+ Set()
+ """
+ return cast(
+ Set[bytes], self._execute_command(RequestType.SPop, [key, str(count)])
+ )
+
+ def sismember(
+ self,
+ key: TEncodable,
+ member: TEncodable,
+ ) -> bool:
+ """
+ Returns if `member` is a member of the set stored at `key`.
+
+ See https://valkey.io/commands/sismember/ for more details.
+
+ Args:
+ key (TEncodable): The key of the set.
+ member (TEncodable): The member to check for existence in the set.
+
+ Returns:
+ bool: True if the member exists in the set, False otherwise.
+ If `key` doesn't exist, it is treated as an empty set and the command returns False.
+
+ Examples:
+ >>> client.sismember("my_set", "member1")
+ True # Indicates that "member1" exists in the set "my_set".
+ >>> client.sismember("my_set", "non_existing_member")
+ False # Indicates that "non_existing_member" does not exist in the set "my_set".
+ """
+ return cast(
+ bool,
+ self._execute_command(RequestType.SIsMember, [key, member]),
+ )
+
+ def smove(
+ self,
+ source: TEncodable,
+ destination: TEncodable,
+ member: TEncodable,
+ ) -> bool:
+ """
+ Moves `member` from the set at `source` to the set at `destination`, removing it from the source set. Creates a
+ new destination set if needed. The operation is atomic.
+
+ See https://valkey.io/commands/smove for more details.
+
+ Note:
+ When in cluster mode, `source` and `destination` must map to the same hash slot.
+
+ Args:
+ source (TEncodable): The key of the set to remove the element from.
+ destination (TEncodable): The key of the set to add the element to.
+ member (TEncodable): The set element to move.
+
+ Returns:
+ bool: True on success, or False if the `source` set does not exist or the element is not a member of the source set.
+
+ Examples:
+ >>> client.smove("set1", "set2", "member1")
+ True # "member1" was moved from "set1" to "set2".
+ """
+ return cast(
+ bool,
+ self._execute_command(
+ RequestType.SMove, [source, destination, member]
+ ),
+ )
+
+ def sunion(self, keys: List[TEncodable]) -> Set[bytes]:
+ """
+ Gets the union of all the given sets.
+
+ See https://valkey.io/commands/sunion for more details.
+
+ Note:
+ When in cluster mode, all `keys` must map to the same hash slot.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sets.
+
+ Returns:
+ Set[bytes]: A set of members which are present in at least one of the given sets.
+ If none of the sets exist, an empty set will be returned.
+
+ Examples:
+ >>> client.sadd("my_set1", ["member1", "member2"])
+ >>> client.sadd("my_set2", ["member2", "member3"])
+ >>> client.sunion(["my_set1", "my_set2"])
+ {b"member1", b"member2", b"member3"} # sets "my_set1" and "my_set2" have three unique members
+ >>> client.sunion(["my_set1", "non_existing_set"])
+ {b"member1", b"member2"}
+ """
+ return cast(Set[bytes], self._execute_command(RequestType.SUnion, keys))
+
+ def sunionstore(
+ self,
+ destination: TEncodable,
+ keys: List[TEncodable],
+ ) -> int:
+ """
+ Stores the members of the union of all given sets specified by `keys` into a new set at `destination`.
+
+ See https://valkey.io/commands/sunionstore for more details.
+
+ Note:
+ When in cluster mode, all keys in `keys` and `destination` must map to the same hash slot.
+
+ Args:
+ destination (TEncodable): The key of the destination set.
+ keys (List[TEncodable]): The keys from which to retrieve the set members.
+
+ Returns:
+ int: The number of elements in the resulting set.
+
+ Examples:
+ >>> client.sadd("set1", ["member1"])
+ >>> client.sadd("set2", ["member2"])
+ >>> client.sunionstore("my_set", ["set1", "set2"])
+ 2 # Two elements were stored in "my_set", and those two members are the union of "set1" and "set2".
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.SUnionStore, [destination] + keys),
+ )
+
+ def sdiffstore(self, destination: TEncodable, keys: List[TEncodable]) -> int:
+ """
+ Stores the difference between the first set and all the successive sets in `keys` into a new set at
+ `destination`.
+
+ See https://valkey.io/commands/sdiffstore for more details.
+
+ Note:
+ When in Cluster mode, all keys in `keys` and `destination` must map to the same hash slot.
+
+ Args:
+ destination (TEncodable): The key of the destination set.
+ keys (List[TEncodable]): The keys of the sets to diff.
+
+ Returns:
+ int: The number of elements in the resulting set.
+
+ Examples:
+ >>> client.sadd("set1", ["member1", "member2"])
+ >>> client.sadd("set2", ["member1"])
+ >>> client.sdiffstore("set3", ["set1", "set2"])
+ 1 # Indicates that one member was stored in "set3", and that member is the diff between "set1" and "set2".
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.SDiffStore, [destination] + keys),
+ )
+
+ def sinter(self, keys: List[TEncodable]) -> Set[bytes]:
+ """
+ Gets the intersection of all the given sets.
+
+ See https://valkey.io/commands/sinter for more details.
+
+ Note:
+ When in cluster mode, all `keys` must map to the same hash slot.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sets.
+
+ Returns:
+ Set[bytes]: A set of members which are present in all given sets.
+ If one or more sets do no exist, an empty set will be returned.
+
+ Examples:
+ >>> client.sadd("my_set1", ["member1", "member2"])
+ >>> client.sadd("my_set2", ["member2", "member3"])
+ >>> client.sinter(["my_set1", "my_set2"])
+ {b"member2"} # sets "my_set1" and "my_set2" have one commom member
+ >>> client.sinter([my_set1", "non_existing_set"])
+ None
+ """
+ return cast(Set[bytes], self._execute_command(RequestType.SInter, keys))
+
+ def sinterstore(self, destination: TEncodable, keys: List[TEncodable]) -> int:
+ """
+ Stores the members of the intersection of all given sets specified by `keys` into a new set at `destination`.
+
+ See https://valkey.io/commands/sinterstore for more details.
+
+ Note:
+ When in Cluster mode, all `keys` and `destination` must map to the same hash slot.
+
+ Args:
+ destination (TEncodable): The key of the destination set.
+ keys (List[TEncodable]): The keys from which to retrieve the set members.
+
+ Returns:
+ int: The number of elements in the resulting set.
+
+ Examples:
+ >>> client.sadd("my_set1", ["member1", "member2"])
+ >>> client.sadd("my_set2", ["member2", "member3"])
+ >>> client.sinterstore("my_set3", ["my_set1", "my_set2"])
+ 1 # One element was stored at "my_set3", and that element is the intersection of "my_set1" and "myset2".
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.SInterStore, [destination] + keys),
+ )
+
+ def sintercard(
+ self, keys: List[TEncodable], limit: Optional[int] = None
+ ) -> int:
+ """
+ Gets the cardinality of the intersection of all the given sets.
+ Optionally, a `limit` can be specified to stop the computation early if the intersection cardinality reaches the specified limit.
+
+ When in cluster mode, all keys in `keys` must map to the same hash slot.
+
+ See https://valkey.io/commands/sintercard for more details.
+
+ Args:
+ keys (List[TEncodable]): A list of keys representing the sets to intersect.
+ limit (Optional[int]): An optional limit to the maximum number of intersecting elements to count.
+ If specified, the computation stops as soon as the cardinality reaches this limit.
+
+ Returns:
+ int: The number of elements in the resulting set of the intersection.
+
+ Examples:
+ >>> client.sadd("set1", {"a", "b", "c"})
+ >>> client.sadd("set2", {"b", "c", "d"})
+ >>> client.sintercard(["set1", "set2"])
+ 2 # The intersection of "set1" and "set2" contains 2 elements: "b" and "c".
+
+ >>> client.sintercard(["set1", "set2"], limit=1)
+ 1 # The computation stops early as the intersection cardinality reaches the limit of 1.
+ """
+ args: List[TEncodable] = [str(len(keys))]
+ args.extend(keys)
+ if limit is not None:
+ args += ["LIMIT", str(limit)]
+ return cast(
+ int,
+ self._execute_command(RequestType.SInterCard, args),
+ )
+
+ def sdiff(self, keys: List[TEncodable]) -> Set[bytes]:
+ """
+ Computes the difference between the first set and all the successive sets in `keys`.
+
+ See https://valkey.io/commands/sdiff for more details.
+
+ Note:
+ When in cluster mode, all `keys` must map to the same hash slot.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sets to diff
+
+ Returns:
+ Set[bytes]: A set of elements representing the difference between the sets.
+ If any of the keys in `keys` do not exist, they are treated as empty sets.
+
+ Examples:
+ >>> client.sadd("set1", ["member1", "member2"])
+ >>> client.sadd("set2", ["member1"])
+ >>> client.sdiff("set1", "set2")
+ {b"member2"} # "member2" is in "set1" but not "set2"
+ """
+ return cast(
+ Set[bytes],
+ self._execute_command(RequestType.SDiff, keys),
+ )
+
+ def smismember(
+ self, key: TEncodable, members: List[TEncodable]
+ ) -> List[bool]:
+ """
+ Checks whether each member is contained in the members of the set stored at `key`.
+
+ See https://valkey.io/commands/smismember for more details.
+
+ Args:
+ key (TEncodable): The key of the set to check.
+ members (List[TEncodable]): A list of members to check for existence in the set.
+
+ Returns:
+ List[bool]: A list of bool values, each indicating if the respective member exists in the set.
+
+ Examples:
+ >>> client.sadd("set1", ["a", "b", "c"])
+ >>> client.smismember("set1", ["b", "c", "d"])
+ [True, True, False] # "b" and "c" are members of "set1", but "d" is not.
+ """
+ return cast(
+ List[bool],
+ self._execute_command(RequestType.SMIsMember, [key] + members),
+ )
+
+ def ltrim(self, key: TEncodable, start: int, end: int) -> TOK:
+ """
+ Trim an existing list so that it will contain only the specified range of elements specified.
+ The offsets `start` and `end` are zero-based indexes, with 0 being the first element of the list, 1 being the next
+ element and so on.
+ These offsets can also be negative numbers indicating offsets starting at the end of the list, with -1 being the last
+ element of the list, -2 being the penultimate, and so on.
+ See https://valkey.io/commands/ltrim/ for more details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ start (int): The starting point of the range.
+ end (int): The end of the range.
+
+ Returns:
+ TOK: A simple "OK" response.
+ If `start` exceeds the end of the list, or if `start` is greater than `end`, the result will be an empty list
+ (which causes `key` to be removed).
+ If `end` exceeds the actual end of the list, it will be treated like the last element of the list.
+ If `key` does not exist, "OK" will be returned without changes to the database.
+
+ Examples:
+ >>> client.ltrim("my_list", 0, 1)
+ "OK" # Indicates that the list has been trimmed to contain elements from 0 to 1.
+ """
+ return cast(
+ TOK,
+ self._execute_command(RequestType.LTrim, [key, str(start), str(end)]),
+ )
+
+ def lrem(self, key: TEncodable, count: int, element: TEncodable) -> int:
+ """
+ Removes the first `count` occurrences of elements equal to `element` from the list stored at `key`.
+ If `count` is positive, it removes elements equal to `element` moving from head to tail.
+ If `count` is negative, it removes elements equal to `element` moving from tail to head.
+ If `count` is 0 or greater than the occurrences of elements equal to `element`, it removes all elements
+ equal to `element`.
+ See https://valkey.io/commands/lrem/ for more details.
+
+ Args:
+ key (TEncodable): The key of the list.
+ count (int): The count of occurrences of elements equal to `element` to remove.
+ element (TEncodable): The element to remove from the list.
+
+ Returns:
+ int: The number of removed elements.
+ If `key` does not exist, 0 is returned.
+
+ Examples:
+ >>> client.lrem("my_list", 2, "value")
+ 2 # Removes the first 2 occurrences of "value" in the list.
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.LRem, [key, str(count), element]),
+ )
+
+ def llen(self, key: TEncodable) -> int:
+ """
+ Get the length of the list stored at `key`.
+ See https://valkey.io/commands/llen/ for details.
+
+ Args:
+ key (TEncodable): The key of the list.
+
+ Returns:
+ int: The length of the list at the specified key.
+ If `key` does not exist, it is interpreted as an empty list and 0 is returned.
+
+ Examples:
+ >>> client.llen("my_list")
+ 3 # Indicates that there are 3 elements in the list.
+ """
+ return cast(int, self._execute_command(RequestType.LLen, [key]))
+
+ def exists(self, keys: List[TEncodable]) -> int:
+ """
+ Returns the number of keys in `keys` that exist in the database.
+ See https://valkey.io/commands/exists/ for more details.
+
+ Note:
+ In cluster mode, if keys in `keys` map to different hash slots,
+ the command will be split across these slots and executed separately for each.
+ This means the command is atomic only at the slot level. If one or more slot-specific
+ requests fail, the entire call will return the first encountered error, even
+ though some requests may have succeeded while others did not.
+ If this behavior impacts your application logic, consider splitting the
+ request into sub-requests per slot to ensure atomicity.
+
+ Args:
+ keys (List[TEncodable]): The list of keys to check.
+
+ Returns:
+ int: The number of keys that exist. If the same existing key is mentioned in `keys` multiple times,
+ it will be counted multiple times.
+
+ Examples:
+ >>> client.exists(["key1", "key2", "key3"])
+ 3 # Indicates that all three keys exist in the database.
+ """
+ return cast(int, self._execute_command(RequestType.Exists, keys))
+
+ def unlink(self, keys: List[TEncodable]) -> int:
+ """
+ Unlink (delete) multiple keys from the database.
+ A key is ignored if it does not exist.
+ This command, similar to DEL, removes specified keys and ignores non-existent ones.
+ However, this command does not block the server, while [DEL](https://valkey.io/commands/del) does.
+ See https://valkey.io/commands/unlink/ for more details.
+
+ Note:
+ In cluster mode, if keys in `key_value_map` map to different hash slots,
+ the command will be split across these slots and executed separately for each.
+ This means the command is atomic only at the slot level. If one or more slot-specific
+ requests fail, the entire call will return the first encountered error, even
+ though some requests may have succeeded while others did not.
+ If this behavior impacts your application logic, consider splitting the
+ request into sub-requests per slot to ensure atomicity.
+
+ Args:
+ keys (List[TEncodable]): The list of keys to unlink.
+
+ Returns:
+ int: The number of keys that were unlinked.
+
+ Examples:
+ >>> client.unlink(["key1", "key2", "key3"])
+ 3 # Indicates that all three keys were unlinked from the database.
+ """
+ return cast(int, self._execute_command(RequestType.Unlink, keys))
+
+ def expire(
+ self,
+ key: TEncodable,
+ seconds: int,
+ option: Optional[ExpireOptions] = None,
+ ) -> bool:
+ """
+ Sets a timeout on `key` in seconds. After the timeout has expired, the key will automatically be deleted.
+ If `key` already has an existing expire set, the time to live is updated to the new value.
+ If `seconds` is a non-positive number, the key will be deleted rather than expired.
+ The timeout will only be cleared by commands that delete or overwrite the contents of `key`.
+ See https://valkey.io/commands/expire/ for more details.
+
+ Args:
+ key (TEncodable): The key to set a timeout on.
+ seconds (int): The timeout in seconds.
+ option (ExpireOptions, optional): The expire option.
+
+ Returns:
+ bool: 'True' if the timeout was set, 'False' if the timeout was not set (e.g., the key doesn't exist or the operation is
+ skipped due to the provided arguments).
+
+ Examples:
+ >>> client.expire("my_key", 60)
+ True # Indicates that a timeout of 60 seconds has been set for "my_key."
+ """
+ args: List[TEncodable] = (
+ [key, str(seconds)] if option is None else [key, str(seconds), option.value]
+ )
+ return cast(bool, self._execute_command(RequestType.Expire, args))
+
+ def expireat(
+ self,
+ key: TEncodable,
+ unix_seconds: int,
+ option: Optional[ExpireOptions] = None,
+ ) -> bool:
+ """
+ Sets a timeout on `key` using an absolute Unix timestamp (seconds since January 1, 1970) instead of specifying the
+ number of seconds.
+ A timestamp in the past will delete the key immediately. After the timeout has expired, the key will automatically be
+ deleted.
+ If `key` already has an existing expire set, the time to live is updated to the new value.
+ The timeout will only be cleared by commands that delete or overwrite the contents of `key`.
+ See https://valkey.io/commands/expireat/ for more details.
+
+ Args:
+ key (TEncodable): The key to set a timeout on.
+ unix_seconds (int): The timeout in an absolute Unix timestamp.
+ option (Optional[ExpireOptions]): The expire option.
+
+ Returns:
+ bool: 'True' if the timeout was set, 'False' if the timeout was not set (e.g., the key doesn't exist or the operation is
+ skipped due to the provided arguments).
+
+ Examples:
+ >>> client.expireAt("my_key", 1672531200, ExpireOptions.HasNoExpiry)
+ True
+ """
+ args = (
+ [key, str(unix_seconds)]
+ if option is None
+ else [key, str(unix_seconds), option.value]
+ )
+ return cast(bool, self._execute_command(RequestType.ExpireAt, args))
+
+ def pexpire(
+ self,
+ key: TEncodable,
+ milliseconds: int,
+ option: Optional[ExpireOptions] = None,
+ ) -> bool:
+ """
+ Sets a timeout on `key` in milliseconds. After the timeout has expired, the key will automatically be deleted.
+ If `key` already has an existing expire set, the time to live is updated to the new value.
+ If `milliseconds` is a non-positive number, the key will be deleted rather than expired.
+ The timeout will only be cleared by commands that delete or overwrite the contents of `key`.
+ See https://valkey.io/commands/pexpire/ for more details.
+
+ Args:
+ key (TEncodable): The key to set a timeout on.
+ milliseconds (int): The timeout in milliseconds.
+ option (Optional[ExpireOptions]): The expire option.
+
+ Returns:
+ bool: 'True' if the timeout was set, 'False' if the timeout was not set (e.g., the key doesn't exist or the operation is
+ skipped due to the provided arguments).
+
+ Examples:
+ >>> client.pexpire("my_key", 60000, ExpireOptions.HasNoExpiry)
+ True # Indicates that a timeout of 60,000 milliseconds has been set for "my_key."
+ """
+ args = (
+ [key, str(milliseconds)]
+ if option is None
+ else [key, str(milliseconds), option.value]
+ )
+ return cast(bool, self._execute_command(RequestType.PExpire, args))
+
+ def pexpireat(
+ self,
+ key: TEncodable,
+ unix_milliseconds: int,
+ option: Optional[ExpireOptions] = None,
+ ) -> bool:
+ """
+ Sets a timeout on `key` using an absolute Unix timestamp in milliseconds (milliseconds since January 1, 1970) instead
+ of specifying the number of milliseconds.
+ A timestamp in the past will delete the key immediately. After the timeout has expired, the key will automatically be
+ deleted.
+ If `key` already has an existing expire set, the time to live is updated to the new value.
+ The timeout will only be cleared by commands that delete or overwrite the contents of `key`.
+ See https://valkey.io/commands/pexpireat/ for more details.
+
+ Args:
+ key (TEncodable): The key to set a timeout on.
+ unix_milliseconds (int): The timeout in an absolute Unix timestamp in milliseconds.
+ option (Optional[ExpireOptions]): The expire option.
+
+ Returns:
+ bool: 'True' if the timeout was set, 'False' if the timeout was not set (e.g., the key doesn't exist or the operation is
+ skipped due to the provided arguments).
+
+ Examples:
+ >>> client.pexpireAt("my_key", 1672531200000, ExpireOptions.HasNoExpiry)
+ True
+ """
+ args = (
+ [key, str(unix_milliseconds)]
+ if option is None
+ else [key, str(unix_milliseconds), option.value]
+ )
+ return cast(bool, self._execute_command(RequestType.PExpireAt, args))
+
+ def expiretime(self, key: TEncodable) -> int:
+ """
+ Returns the absolute Unix timestamp (since January 1, 1970) at which
+ the given `key` will expire, in seconds.
+ To get the expiration with millisecond precision, use `pexpiretime`.
+
+ See https://valkey.io/commands/expiretime/ for details.
+
+ Args:
+ key (TEncodable): The `key` to determine the expiration value of.
+
+ Returns:
+ int: The expiration Unix timestamp in seconds, -2 if `key` does not exist or -1 if `key` exists but has no associated expire.
+
+ Examples:
+ >>> client.expiretime("my_key")
+ -2 # 'my_key' doesn't exist.
+ >>> client.set("my_key", "value")
+ >>> client.expiretime("my_key")
+ -1 # 'my_key' has no associate expiration.
+ >>> client.expire("my_key", 60)
+ >>> client.expiretime("my_key")
+ 1718614954
+
+ Since: Valkey version 7.0.0.
+ """
+ return cast(int, self._execute_command(RequestType.ExpireTime, [key]))
+
+ def pexpiretime(self, key: TEncodable) -> int:
+ """
+ Returns the absolute Unix timestamp (since January 1, 1970) at which
+ the given `key` will expire, in milliseconds.
+
+ See https://valkey.io/commands/pexpiretime/ for details.
+
+ Args:
+ key (TEncodable): The `key` to determine the expiration value of.
+
+ Returns:
+ int: The expiration Unix timestamp in milliseconds, -2 if `key` does not exist, or -1 if `key` exists but has no associated expiration.
+
+ Examples:
+ >>> client.pexpiretime("my_key")
+ -2 # 'my_key' doesn't exist.
+ >>> client.set("my_key", "value")
+ >>> client.pexpiretime("my_key")
+ -1 # 'my_key' has no associate expiration.
+ >>> client.expire("my_key", 60)
+ >>> client.pexpiretime("my_key")
+ 1718615446670
+
+ Since: Valkey version 7.0.0.
+ """
+ return cast(int, self._execute_command(RequestType.PExpireTime, [key]))
+
+ def ttl(self, key: TEncodable) -> int:
+ """
+ Returns the remaining time to live of `key` that has a timeout.
+ See https://valkey.io/commands/ttl/ for more details.
+
+ Args:
+ key (TEncodable): The key to return its timeout.
+
+ Returns:
+ int: TTL in seconds, -2 if `key` does not exist or -1 if `key` exists but has no associated expire.
+
+ Examples:
+ >>> client.ttl("my_key")
+ 3600 # Indicates that "my_key" has a remaining time to live of 3600 seconds.
+ >>> client.ttl("nonexistent_key")
+ -2 # Returns -2 for a non-existing key.
+ >>> client.ttl("key")
+ -1 # Indicates that "key: has no has no associated expire.
+ """
+ return cast(int, self._execute_command(RequestType.TTL, [key]))
+
+ def pttl(
+ self,
+ key: TEncodable,
+ ) -> int:
+ """
+ Returns the remaining time to live of `key` that has a timeout, in milliseconds.
+ See https://valkey.io/commands/pttl for more details.
+
+ Args:
+ key (TEncodable): The key to return its timeout.
+
+ Returns:
+ int: TTL in milliseconds. -2 if `key` does not exist, -1 if `key` exists but has no associated expire.
+
+ Examples:
+ >>> client.pttl("my_key")
+ 5000 # Indicates that the key "my_key" has a remaining time to live of 5000 milliseconds.
+ >>> client.pttl("non_existing_key")
+ -2 # Indicates that the key "non_existing_key" does not exist.
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.PTTL, [key]),
+ )
+
+ def persist(
+ self,
+ key: TEncodable,
+ ) -> bool:
+ """
+ Remove the existing timeout on `key`, turning the key from volatile (a key with an expire set) to
+ persistent (a key that will never expire as no timeout is associated).
+
+ See https://valkey.io/commands/persist/ for more details.
+
+ Args:
+ key (TEncodable): The key to remove the existing timeout on.
+
+ Returns:
+ bool: False if `key` does not exist or does not have an associated timeout, True if the timeout has been removed.
+
+ Examples:
+ >>> client.persist("my_key")
+ True # Indicates that the timeout associated with the key "my_key" was successfully removed.
+ """
+ return cast(
+ bool,
+ self._execute_command(RequestType.Persist, [key]),
+ )
+
+ def type(self, key: TEncodable) -> bytes:
+ """
+ Returns the bytes string representation of the type of the value stored at `key`.
+
+ See https://valkey.io/commands/type/ for more details.
+
+ Args:
+ key (TEncodable): The key to check its data type.
+
+ Returns:
+ bytes: If the key exists, the type of the stored value is returned.
+ Otherwise, a b"none" bytes string is returned.
+
+ Examples:
+ >>> client.set("key", "value")
+ >>> client.type("key")
+ b'string'
+ >>> client.lpush("key", ["value"])
+ >>> client.type("key")
+ b'list'
+ """
+ return cast(bytes, self._execute_command(RequestType.Type, [key]))
+
+ def xadd(
+ self,
+ key: TEncodable,
+ values: List[Tuple[TEncodable, TEncodable]],
+ options: Optional[StreamAddOptions] = None,
+ ) -> Optional[bytes]:
+ """
+ Adds an entry to the specified stream stored at `key`. If the `key` doesn't exist, the stream is created.
+
+ See https://valkey.io/commands/xadd for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ values (List[Tuple[TEncodable, TEncodable]]): Field-value pairs to be added to the entry.
+ options (Optional[StreamAddOptions]): Additional options for adding entries to the stream. Default to None. See `StreamAddOptions`.
+
+ Returns:
+ bytes: The id of the added entry, or None if `options.make_stream` is set to False and no stream with the matching `key` exists.
+
+ Example:
+ >>> client.xadd("mystream", [("field", "value"), ("field2", "value2")])
+ b"1615957011958-0" # Example stream entry ID.
+ >>> client.xadd("non_existing_stream", [(field, "foo1"), (field2, "bar1")], StreamAddOptions(id="0-1", make_stream=False))
+ None # The key doesn't exist, therefore, None is returned.
+ >>> client.xadd("non_existing_stream", [(field, "foo1"), (field2, "bar1")], StreamAddOptions(id="0-1"))
+ b"0-1" # Returns the stream id.
+ """
+ args: List[TEncodable] = [key]
+ if options:
+ args.extend(options.to_args())
+ else:
+ args.append("*")
+ args.extend([field for pair in values for field in pair])
+
+ return cast(
+ Optional[bytes], self._execute_command(RequestType.XAdd, args)
+ )
+
+ def xdel(self, key: TEncodable, ids: List[TEncodable]) -> int:
+ """
+ Removes the specified entries by id from a stream, and returns the number of entries deleted.
+
+ See https://valkey.io/commands/xdel for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ ids (List[TEncodable]): An array of entry ids.
+
+ Returns:
+ int: The number of entries removed from the stream. This number may be less than the number of entries in
+ `ids`, if the specified `ids` don't exist in the stream.
+
+ Examples:
+ >>> client.xdel("key", ["1538561698944-0", "1538561698944-1"])
+ 2 # Stream marked 2 entries as deleted.
+ """
+ args: List[TEncodable] = [key]
+ args.extend(ids)
+ return cast(
+ int,
+ self._execute_command(RequestType.XDel, [key] + ids),
+ )
+
+ def xtrim(
+ self,
+ key: TEncodable,
+ options: StreamTrimOptions,
+ ) -> int:
+ """
+ Trims the stream stored at `key` by evicting older entries.
+
+ See https://valkey.io/commands/xtrim for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ options (StreamTrimOptions): Options detailing how to trim the stream. See `StreamTrimOptions`.
+
+ Returns:
+ int: TThe number of entries deleted from the stream. If `key` doesn't exist, 0 is returned.
+
+ Example:
+ >>> client.xadd("mystream", [("field", "value"), ("field2", "value2")], StreamAddOptions(id="0-1"))
+ >>> client.xtrim("mystream", TrimByMinId(exact=True, threshold="0-2")))
+ 1 # One entry was deleted from the stream.
+ """
+ args = [key]
+ if options:
+ args.extend(options.to_args())
+
+ return cast(int, self._execute_command(RequestType.XTrim, args))
+
+ def xlen(self, key: TEncodable) -> int:
+ """
+ Returns the number of entries in the stream stored at `key`.
+
+ See https://valkey.io/commands/xlen for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+
+ Returns:
+ int: The number of entries in the stream. If `key` does not exist, returns 0.
+
+ Examples:
+ >>> client.xadd("mystream", [("field", "value")])
+ >>> client.xadd("mystream", [("field2", "value2")])
+ >>> client.xlen("mystream")
+ 2 # There are 2 entries in "mystream".
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.XLen, [key]),
+ )
+
+ def xrange(
+ self,
+ key: TEncodable,
+ start: StreamRangeBound,
+ end: StreamRangeBound,
+ count: Optional[int] = None,
+ ) -> Optional[Mapping[bytes, List[List[bytes]]]]:
+ """
+ Returns stream entries matching a given range of IDs.
+
+ See https://valkey.io/commands/xrange for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ start (StreamRangeBound): The starting stream ID bound for the range.
+ - Use `IdBound` to specify a stream ID.
+ - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
+ - Use `MinId` to start with the minimum available ID.
+ end (StreamRangeBound): The ending stream ID bound for the range.
+ - Use `IdBound` to specify a stream ID.
+ - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
+ - Use `MaxId` to end with the maximum available ID.
+ count (Optional[int]): An optional argument specifying the maximum count of stream entries to return.
+ If `count` is not provided, all stream entries in the range will be returned.
+
+ Returns:
+ Optional[Mapping[bytes, List[List[bytes]]]]: A mapping of stream IDs to stream entry data, where entry data is a
+ list of pairings with format `[[field, entry], [field, entry], ...]`. Returns None if the range
+ arguments are not applicable.
+
+ Examples:
+ >>> client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="0-1"))
+ >>> client.xadd("mystream", [("field2", "value2"), ("field2", "value3")], StreamAddOptions(id="0-2"))
+ >>> client.xrange("mystream", MinId(), MaxId())
+ {
+ b"0-1": [[b"field1", b"value1"]],
+ b"0-2": [[b"field2", b"value2"], [b"field2", b"value3"]],
+ } # Indicates the stream IDs and their associated field-value pairs for all stream entries in "mystream".
+ """
+ args: List[TEncodable] = [key, start.to_arg(), end.to_arg()]
+ if count is not None:
+ args.extend(["COUNT", str(count)])
+
+ return cast(
+ Optional[Mapping[bytes, List[List[bytes]]]],
+ self._execute_command(RequestType.XRange, args),
+ )
+
+ def xrevrange(
+ self,
+ key: TEncodable,
+ end: StreamRangeBound,
+ start: StreamRangeBound,
+ count: Optional[int] = None,
+ ) -> Optional[Mapping[bytes, List[List[bytes]]]]:
+ """
+ Returns stream entries matching a given range of IDs in reverse order. Equivalent to `XRANGE` but returns the
+ entries in reverse order.
+
+ See https://valkey.io/commands/xrevrange for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ end (StreamRangeBound): The ending stream ID bound for the range.
+ - Use `IdBound` to specify a stream ID.
+ - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
+ - Use `MaxId` to end with the maximum available ID.
+ start (StreamRangeBound): The starting stream ID bound for the range.
+ - Use `IdBound` to specify a stream ID.
+ - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
+ - Use `MinId` to start with the minimum available ID.
+ count (Optional[int]): An optional argument specifying the maximum count of stream entries to return.
+ If `count` is not provided, all stream entries in the range will be returned.
+
+ Returns:
+ Optional[Mapping[bytes, List[List[bytes]]]]: A mapping of stream IDs to stream entry data, where entry data is a
+ list of pairings with format `[[field, entry], [field, entry], ...]`. Returns None if the range
+ arguments are not applicable.
+
+ Examples:
+ >>> client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="0-1"))
+ >>> client.xadd("mystream", [("field2", "value2"), ("field2", "value3")], StreamAddOptions(id="0-2"))
+ >>> client.xrevrange("mystream", MaxId(), MinId())
+ {
+ "0-2": [["field2", "value2"], ["field2", "value3"]],
+ "0-1": [["field1", "value1"]],
+ } # Indicates the stream IDs and their associated field-value pairs for all stream entries in "mystream".
+ """
+ args: List[TEncodable] = [key, end.to_arg(), start.to_arg()]
+ if count is not None:
+ args.extend(["COUNT", str(count)])
+
+ return cast(
+ Optional[Mapping[bytes, List[List[bytes]]]],
+ self._execute_command(RequestType.XRevRange, args),
+ )
+
+ def xread(
+ self,
+ keys_and_ids: Mapping[TEncodable, TEncodable],
+ options: Optional[StreamReadOptions] = None,
+ ) -> Optional[Mapping[bytes, Mapping[bytes, List[List[bytes]]]]]:
+ """
+ Reads entries from the given streams.
+
+ See https://valkey.io/commands/xread for more details.
+
+ Note:
+ When in cluster mode, all keys in `keys_and_ids` must map to the same hash slot.
+
+ Args:
+ keys_and_ids (Mapping[TEncodable, TEncodable]): A mapping of keys and entry IDs to read from.
+ options (Optional[StreamReadOptions]): Options detailing how to read the stream.
+
+ Returns:
+ Optional[Mapping[bytes, Mapping[bytes, List[List[bytes]]]]]: A mapping of stream keys, to a mapping of stream IDs,
+ to a list of pairings with format `[[field, entry], [field, entry], ...]`.
+ None will be returned under the following conditions:
+ - All key-ID pairs in `keys_and_ids` have either a non-existing key or a non-existing ID, or there are no entries after the given ID.
+ - The `BLOCK` option is specified and the timeout is hit.
+
+ Examples:
+ >>> client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="0-1"))
+ >>> client.xadd("mystream", [("field2", "value2"), ("field2", "value3")], StreamAddOptions(id="0-2"))
+ >>> client.xread({"mystream": "0-0"}, StreamReadOptions(block_ms=1000))
+ {
+ b"mystream": {
+ b"0-1": [[b"field1", b"value1"]],
+ b"0-2": [[b"field2", b"value2"], [b"field2", b"value3"]],
+ }
+ }
+ # Indicates the stream entries for "my_stream" with IDs greater than "0-0". The operation blocks up to
+ # 1000ms if there is no stream data.
+ """
+ args: List[TEncodable] = [] if options is None else options.to_args()
+ args.append("STREAMS")
+ args.extend([key for key in keys_and_ids.keys()])
+ args.extend([value for value in keys_and_ids.values()])
+
+ return cast(
+ Optional[Mapping[bytes, Mapping[bytes, List[List[bytes]]]]],
+ self._execute_command(RequestType.XRead, args),
+ )
+
+ def xgroup_create(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ group_id: TEncodable,
+ options: Optional[StreamGroupOptions] = None,
+ ) -> TOK:
+ """
+ Creates a new consumer group uniquely identified by `group_name` for the stream stored at `key`.
+
+ See https://valkey.io/commands/xgroup-create for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The newly created consumer group name.
+ group_id (TEncodable): The stream entry ID that specifies the last delivered entry in the stream from the new
+ group’s perspective. The special ID "$" can be used to specify the last entry in the stream.
+ options (Optional[StreamGroupOptions]): Options for creating the stream group.
+
+ Returns:
+ TOK: A simple "OK" response.
+
+ Examples:
+ >>> client.xgroup_create("mystream", "mygroup", "$", StreamGroupOptions(make_stream=True))
+ OK
+ # Created the consumer group "mygroup" for the stream "mystream", which will track entries created after
+ # the most recent entry. The stream was created with length 0 if it did not already exist.
+ """
+ args: List[TEncodable] = [key, group_name, group_id]
+ if options is not None:
+ args.extend(options.to_args())
+
+ return cast(
+ TOK,
+ self._execute_command(RequestType.XGroupCreate, args),
+ )
+
+ def xgroup_destroy(self, key: TEncodable, group_name: TEncodable) -> bool:
+ """
+ Destroys the consumer group `group_name` for the stream stored at `key`.
+
+ See https://valkey.io/commands/xgroup-destroy for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name to delete.
+
+ Returns:
+ bool: True if the consumer group was destroyed. Otherwise, returns False.
+
+ Examples:
+ >>> client.xgroup_destroy("mystream", "mygroup")
+ True # The consumer group "mygroup" for stream "mystream" was destroyed.
+ """
+ return cast(
+ bool,
+ self._execute_command(RequestType.XGroupDestroy, [key, group_name]),
+ )
+
+ def xgroup_create_consumer(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ consumer_name: TEncodable,
+ ) -> bool:
+ """
+ Creates a consumer named `consumer_name` in the consumer group `group_name` for the stream stored at `key`.
+
+ See https://valkey.io/commands/xgroup-createconsumer for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name.
+ consumer_name (TEncodable): The newly created consumer.
+
+ Returns:
+ bool: True if the consumer is created. Otherwise, returns False.
+
+ Examples:
+ >>> client.xgroup_create_consumer("mystream", "mygroup", "myconsumer")
+ True # The consumer "myconsumer" was created in consumer group "mygroup" for the stream "mystream".
+ """
+ return cast(
+ bool,
+ self._execute_command(
+ RequestType.XGroupCreateConsumer, [key, group_name, consumer_name]
+ ),
+ )
+
+ def xgroup_del_consumer(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ consumer_name: TEncodable,
+ ) -> int:
+ """
+ Deletes a consumer named `consumer_name` in the consumer group `group_name` for the stream stored at `key`.
+
+ See https://valkey.io/commands/xgroup-delconsumer for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name.
+ consumer_name (TEncodable): The consumer to delete.
+
+ Returns:
+ int: The number of pending messages the `consumer` had before it was deleted.
+
+ Examples:
+ >>> client.xgroup_del_consumer("mystream", "mygroup", "myconsumer")
+ 5 # Consumer "myconsumer" was deleted, and had 5 pending messages unclaimed.
+ """
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.XGroupDelConsumer, [key, group_name, consumer_name]
+ ),
+ )
+
+ def xgroup_set_id(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ stream_id: TEncodable,
+ entries_read: Optional[int] = None,
+ ) -> TOK:
+ """
+ Set the last delivered ID for a consumer group.
+
+ See https://valkey.io/commands/xgroup-setid for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name.
+ stream_id (TEncodable): The stream entry ID that should be set as the last delivered ID for the consumer group.
+ entries_read: (Optional[int]): A value representing the number of stream entries already read by the
+ group. This option can only be specified if you are using Valkey version 7.0.0 or above.
+
+ Returns:
+ TOK: A simple "OK" response.
+
+ Examples:
+ >>> client.xgroup_set_id("mystream", "mygroup", "0")
+ OK # The last delivered ID for consumer group "mygroup" was set to 0.
+ """
+ args: List[TEncodable] = [key, group_name, stream_id]
+ if entries_read is not None:
+ args.extend(["ENTRIESREAD", str(entries_read)])
+
+ return cast(
+ TOK,
+ self._execute_command(RequestType.XGroupSetId, args),
+ )
+
+ def xreadgroup(
+ self,
+ keys_and_ids: Mapping[TEncodable, TEncodable],
+ group_name: TEncodable,
+ consumer_name: TEncodable,
+ options: Optional[StreamReadGroupOptions] = None,
+ ) -> Optional[Mapping[bytes, Mapping[bytes, Optional[List[List[bytes]]]]]]:
+ """
+ Reads entries from the given streams owned by a consumer group.
+
+ See https://valkey.io/commands/xreadgroup for more details.
+
+ Note:
+ When in cluster mode, all keys in `keys_and_ids` must map to the same hash slot.
+
+ Args:
+ keys_and_ids (Mapping[TEncodable, TEncodable]): A mapping of stream keys to stream entry IDs to read from.
+ Use the special entry ID of `">"` to receive only new messages.
+ group_name (TEncodable): The consumer group name.
+ consumer_name (TEncodable): The consumer name. The consumer will be auto-created if it does not already exist.
+ options (Optional[StreamReadGroupOptions]): Options detailing how to read the stream.
+
+ Returns:
+ Optional[Mapping[bytes, Mapping[bytes, Optional[List[List[bytes]]]]]]: A mapping of stream keys, to a mapping of
+ stream IDs, to a list of pairings with format `[[field, entry], [field, entry], ...]`.
+ Returns None if the BLOCK option is given and a timeout occurs, or if there is no stream that can be served.
+
+ Examples:
+ >>> client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="1-0"))
+ >>> client.xgroup_create("mystream", "mygroup", "0-0")
+ >>> client.xreadgroup({"mystream": ">"}, "mygroup", "myconsumer", StreamReadGroupOptions(count=1))
+ {
+ b"mystream": {
+ b"1-0": [[b"field1", b"value1"]],
+ }
+ } # Read one stream entry from "mystream" using "myconsumer" in the consumer group "mygroup".
+ """
+ args: List[TEncodable] = ["GROUP", group_name, consumer_name]
+ if options is not None:
+ args.extend(options.to_args())
+
+ args.append("STREAMS")
+ args.extend([key for key in keys_and_ids.keys()])
+ args.extend([value for value in keys_and_ids.values()])
+
+ return cast(
+ Optional[Mapping[bytes, Mapping[bytes, Optional[List[List[bytes]]]]]],
+ self._execute_command(RequestType.XReadGroup, args),
+ )
+
+ def xack(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ ids: List[TEncodable],
+ ) -> int:
+ """
+ Removes one or multiple messages from the Pending Entries List (PEL) of a stream consumer group.
+ This command should be called on pending messages so that such messages do not get processed again by the
+ consumer group.
+
+ See https://valkey.io/commands/xack for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name.
+ ids (List[TEncodable]): The stream entry IDs to acknowledge and consume for the given consumer group.
+
+ Returns:
+ int: The number of messages that were successfully acknowledged.
+
+ Examples:
+ >>> client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="1-0"))
+ >>> client.xgroup_create("mystream", "mygroup", "0-0")
+ >>> client.xreadgroup({"mystream": ">"}, "mygroup", "myconsumer")
+ {
+ "mystream": {
+ "1-0": [["field1", "value1"]],
+ }
+ } # Read one stream entry, the entry is now in the Pending Entries List for "mygroup".
+ >>> client.xack("mystream", "mygroup", ["1-0"])
+ 1 # 1 pending message was acknowledged and removed from the Pending Entries List for "mygroup".
+ """
+ args: List[TEncodable] = [key, group_name]
+ args.extend(ids)
+ return cast(
+ int,
+ self._execute_command(RequestType.XAck, [key, group_name] + ids),
+ )
+
+ def xpending(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ ) -> List[Union[int, bytes, List[List[bytes]], None]]:
+ """
+ Returns stream message summary information for pending messages for the given consumer group.
+
+ See https://valkey.io/commands/xpending for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name.
+
+ Returns:
+ List[Union[int, bytes, List[List[bytes]], None]]: A list that includes the summary of pending messages, with the
+ format `[num_group_messages, start_id, end_id, [[consumer_name, num_consumer_messages]]]`, where:
+ - `num_group_messages`: The total number of pending messages for this consumer group.
+ - `start_id`: The smallest ID among the pending messages.
+ - `end_id`: The greatest ID among the pending messages.
+ - `[[consumer_name, num_consumer_messages]]`: A 2D list of every consumer in the consumer group with at
+ least one pending message, and the number of pending messages it has.
+
+ If there are no pending messages for the given consumer group, `[0, None, None, None]` will be returned.
+
+ Examples:
+ >>> client.xpending("my_stream", "my_group")
+ [4, "1-0", "1-3", [["my_consumer1", "3"], ["my_consumer2", "1"]]
+ """
+ return cast(
+ List[Union[int, bytes, List[List[bytes]], None]],
+ self._execute_command(RequestType.XPending, [key, group_name]),
+ )
+
+ def xpending_range(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ start: StreamRangeBound,
+ end: StreamRangeBound,
+ count: int,
+ options: Optional[StreamPendingOptions] = None,
+ ) -> List[List[Union[bytes, int]]]:
+ """
+ Returns an extended form of stream message information for pending messages matching a given range of IDs.
+
+ See https://valkey.io/commands/xpending for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name.
+ start (StreamRangeBound): The starting stream ID bound for the range.
+ - Use `IdBound` to specify a stream ID.
+ - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
+ - Use `MinId` to start with the minimum available ID.
+ end (StreamRangeBound): The ending stream ID bound for the range.
+ - Use `IdBound` to specify a stream ID.
+ - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
+ - Use `MaxId` to end with the maximum available ID.
+ count (int): Limits the number of messages returned.
+ options (Optional[StreamPendingOptions]): The stream pending options.
+
+ Returns:
+ List[List[Union[bytes, int]]]: A list of lists, where each inner list is a length 4 list containing extended
+ message information with the format `[[id, consumer_name, time_elapsed, num_delivered]]`, where:
+ - `id`: The ID of the message.
+ - `consumer_name`: The name of the consumer that fetched the message and has still to acknowledge it. We
+ call it the current owner of the message.
+ - `time_elapsed`: The number of milliseconds that elapsed since the last time this message was delivered
+ to this consumer.
+ - `num_delivered`: The number of times this message was delivered.
+
+ Examples:
+ >>> client.xpending_range("my_stream", "my_group", MinId(), MaxId(), 10, StreamPendingOptions(consumer_name="my_consumer"))
+ [[b"1-0", b"my_consumer", 1234, 1], [b"1-1", b"my_consumer", 1123, 1]]
+ # Extended stream entry information for the pending entries associated with "my_consumer".
+ """
+ args = _create_xpending_range_args(key, group_name, start, end, count, options)
+ return cast(
+ List[List[Union[bytes, int]]],
+ self._execute_command(RequestType.XPending, args),
+ )
+
+ def xclaim(
+ self,
+ key: TEncodable,
+ group: TEncodable,
+ consumer: TEncodable,
+ min_idle_time_ms: int,
+ ids: List[TEncodable],
+ options: Optional[StreamClaimOptions] = None,
+ ) -> Mapping[bytes, List[List[bytes]]]:
+ """
+ Changes the ownership of a pending message.
+
+ See https://valkey.io/commands/xclaim for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group (TEncodable): The consumer group name.
+ consumer (TEncodable): The group consumer.
+ min_idle_time_ms (int): The minimum idle time for the message to be claimed.
+ ids (List[TEncodable]): A array of entry ids.
+ options (Optional[StreamClaimOptions]): Stream claim options.
+
+ Returns:
+ Mapping[bytes, List[List[bytes]]]: A Mapping of message entries with the format
+ {"entryId": [["entry", "data"], ...], ...} that are claimed by the consumer.
+
+ Examples:
+ # read messages from streamId for consumer1
+ >>> client.xreadgroup({"mystream": ">"}, "mygroup", "consumer1")
+ {
+ b"mystream": {
+ b"1-0": [[b"field1", b"value1"]],
+ }
+ }
+ # "1-0" is now read, and we can assign the pending messages to consumer2
+ >>> client.xclaim("mystream", "mygroup", "consumer2", 0, ["1-0"])
+ {b"1-0": [[b"field1", b"value1"]]}
+ """
+
+ args = [key, group, consumer, str(min_idle_time_ms), *ids]
+
+ if options:
+ args.extend(options.to_args())
+
+ return cast(
+ Mapping[bytes, List[List[bytes]]],
+ self._execute_command(RequestType.XClaim, args),
+ )
+
+ def xclaim_just_id(
+ self,
+ key: TEncodable,
+ group: TEncodable,
+ consumer: TEncodable,
+ min_idle_time_ms: int,
+ ids: List[TEncodable],
+ options: Optional[StreamClaimOptions] = None,
+ ) -> List[bytes]:
+ """
+ Changes the ownership of a pending message. This function returns a List with
+ only the message/entry IDs, and is equivalent to using JUSTID in the Valkey API.
+
+ See https://valkey.io/commands/xclaim for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group (TEncodable): The consumer group name.
+ consumer (TEncodable): The group consumer.
+ min_idle_time_ms (int): The minimum idle time for the message to be claimed.
+ ids (List[TEncodable]): A array of entry ids.
+ options (Optional[StreamClaimOptions]): Stream claim options.
+
+ Returns:
+ List[bytes]: A List of message ids claimed by the consumer.
+
+ Examples:
+ # read messages from streamId for consumer1
+ >>> client.xreadgroup({"mystream": ">"}, "mygroup", "consumer1")
+ {
+ b"mystream": {
+ b"1-0": [[b"field1", b"value1"]],
+ }
+ }
+ # "1-0" is now read, and we can assign the pending messages to consumer2
+ >>> client.xclaim_just_id("mystream", "mygroup", "consumer2", 0, ["1-0"])
+ [b"1-0"]
+ """
+
+ args = [
+ key,
+ group,
+ consumer,
+ str(min_idle_time_ms),
+ *ids,
+ StreamClaimOptions.JUST_ID_VALKEY_API,
+ ]
+
+ if options:
+ args.extend(options.to_args())
+
+ return cast(
+ List[bytes],
+ self._execute_command(RequestType.XClaim, args),
+ )
+
+ def xautoclaim(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ consumer_name: TEncodable,
+ min_idle_time_ms: int,
+ start: TEncodable,
+ count: Optional[int] = None,
+ ) -> List[Union[bytes, Mapping[bytes, List[List[bytes]]], List[bytes]]]:
+ """
+ Transfers ownership of pending stream entries that match the specified criteria.
+
+ See https://valkey.io/commands/xautoclaim for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name.
+ consumer_name (TEncodable): The consumer name.
+ min_idle_time_ms (int): Filters the claimed entries to those that have been idle for more than the specified
+ value.
+ start (TEncodable): Filters the claimed entries to those that have an ID equal or greater than the specified value.
+ count (Optional[int]): Limits the number of claimed entries to the specified value.
+
+ Returns:
+ List[Union[bytes, Mapping[bytes, List[List[bytes]]], List[bytes]]]: A list containing the following elements:
+ - A stream ID to be used as the start argument for the next call to `XAUTOCLAIM`. This ID is equivalent
+ to the next ID in the stream after the entries that were scanned, or "0-0" if the entire stream was
+ scanned.
+ - A mapping of the claimed entries, with the keys being the claimed entry IDs and the values being a
+ 2D list of the field-value pairs in the format `[[field1, value1], [field2, value2], ...]`.
+ - If you are using Valkey 7.0.0 or above, the response list will also include a list containing the
+ message IDs that were in the Pending Entries List but no longer exist in the stream. These IDs are
+ deleted from the Pending Entries List.
+
+ Examples:
+ # Valkey version < 7.0.0:
+ >>> client.xautoclaim("my_stream", "my_group", "my_consumer", 3_600_000, "0-0")
+ [
+ b"0-0",
+ {
+ b"1-1": [
+ [b"field1", b"value1"],
+ [b"field2", b"value2"],
+ ]
+ }
+ ]
+ # Stream entry "1-1" was idle for over an hour and was thus claimed by "my_consumer". The entire stream
+ # was scanned.
+
+ # Valkey version 7.0.0 and above:
+ >>> client.xautoclaim("my_stream", "my_group", "my_consumer", 3_600_000, "0-0")
+ [
+ b"0-0",
+ {
+ b"1-1": [
+ [b"field1", b"value1"],
+ [b"field2", b"value2"],
+ ]
+ },
+ [b"1-2"]
+ ]
+ # Stream entry "1-1" was idle for over an hour and was thus claimed by "my_consumer". The entire stream
+ # was scanned. Additionally, entry "1-2" was removed from the Pending Entries List because it no longer
+ # exists in the stream.
+
+ Since: Valkey version 6.2.0.
+ """
+ args: List[TEncodable] = [
+ key,
+ group_name,
+ consumer_name,
+ str(min_idle_time_ms),
+ start,
+ ]
+ if count is not None:
+ args.extend(["COUNT", str(count)])
+
+ return cast(
+ List[Union[bytes, Mapping[bytes, List[List[bytes]]], List[bytes]]],
+ self._execute_command(RequestType.XAutoClaim, args),
+ )
+
+ def xautoclaim_just_id(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ consumer_name: TEncodable,
+ min_idle_time_ms: int,
+ start: TEncodable,
+ count: Optional[int] = None,
+ ) -> List[Union[bytes, List[bytes]]]:
+ """
+ Transfers ownership of pending stream entries that match the specified criteria. This command uses the JUSTID
+ argument to further specify that the return value should contain a list of claimed IDs without their
+ field-value info.
+
+ See https://valkey.io/commands/xautoclaim for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name.
+ consumer_name (TEncodable): The consumer name.
+ min_idle_time_ms (int): Filters the claimed entries to those that have been idle for more than the specified
+ value.
+ start (TEncodable): Filters the claimed entries to those that have an ID equal or greater than the specified value.
+ count (Optional[int]): Limits the number of claimed entries to the specified value.
+
+ Returns:
+ List[Union[bytes, List[bytes]]]: A list containing the following elements:
+ - A stream ID to be used as the start argument for the next call to `XAUTOCLAIM`. This ID is equivalent
+ to the next ID in the stream after the entries that were scanned, or "0-0" if the entire stream was
+ scanned.
+ - A list of the IDs for the claimed entries.
+ - If you are using Valkey 7.0.0 or above, the response list will also include a list containing the
+ message IDs that were in the Pending Entries List but no longer exist in the stream. These IDs are
+ deleted from the Pending Entries List.
+
+ Examples:
+ # Valkey version < 7.0.0:
+ >>> client.xautoclaim_just_id("my_stream", "my_group", "my_consumer", 3_600_000, "0-0")
+ [b"0-0", [b"1-1"]]
+ # Stream entry "1-1" was idle for over an hour and was thus claimed by "my_consumer". The entire stream
+ # was scanned.
+
+ # Valkey version 7.0.0 and above:
+ >>> client.xautoclaim_just_id("my_stream", "my_group", "my_consumer", 3_600_000, "0-0")
+ [b"0-0", [b"1-1"], [b"1-2"]]
+ # Stream entry "1-1" was idle for over an hour and was thus claimed by "my_consumer". The entire stream
+ # was scanned. Additionally, entry "1-2" was removed from the Pending Entries List because it no longer
+ # exists in the stream.
+
+ Since: Valkey version 6.2.0.
+ """
+ args: List[TEncodable] = [
+ key,
+ group_name,
+ consumer_name,
+ str(min_idle_time_ms),
+ start,
+ ]
+ if count is not None:
+ args.extend(["COUNT", str(count)])
+
+ args.append("JUSTID")
+
+ return cast(
+ List[Union[bytes, List[bytes]]],
+ self._execute_command(RequestType.XAutoClaim, args),
+ )
+
+ def xinfo_groups(
+ self,
+ key: TEncodable,
+ ) -> List[Mapping[bytes, Union[bytes, int, None]]]:
+ """
+ Returns the list of all consumer groups and their attributes for the stream stored at `key`.
+
+ See https://valkey.io/commands/xinfo-groups for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+
+ Returns:
+ List[Mapping[bytes, Union[bytes, int, None]]]: A list of mappings, where each mapping represents the
+ attributes of a consumer group for the stream at `key`.
+
+ Examples:
+ >>> client.xinfo_groups("my_stream")
+ [
+ {
+ b"name": b"mygroup",
+ b"consumers": 2,
+ b"pending": 2,
+ b"last-delivered-id": b"1638126030001-0",
+ b"entries-read": 2, # The "entries-read" field was added in Valkey version 7.0.0.
+ b"lag": 0, # The "lag" field was added in Valkey version 7.0.0.
+ },
+ {
+ b"name": b"some-other-group",
+ b"consumers": 1,
+ b"pending": 0,
+ b"last-delivered-id": b"1638126028070-0",
+ b"entries-read": 1,
+ b"lag": 1,
+ }
+ ]
+ # The list of consumer groups and their attributes for stream "my_stream".
+ """
+ return cast(
+ List[Mapping[bytes, Union[bytes, int, None]]],
+ self._execute_command(RequestType.XInfoGroups, [key]),
+ )
+
+ def xinfo_consumers(
+ self,
+ key: TEncodable,
+ group_name: TEncodable,
+ ) -> List[Mapping[bytes, Union[bytes, int]]]:
+ """
+ Returns the list of all consumers and their attributes for the given consumer group of the stream stored at
+ `key`.
+
+ See https://valkey.io/commands/xinfo-consumers for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ group_name (TEncodable): The consumer group name.
+
+ Returns:
+ List[Mapping[bytes, Union[bytes, int]]]: A list of mappings, where each mapping contains the attributes of a
+ consumer for the given consumer group of the stream at `key`.
+
+ Examples:
+ >>> client.xinfo_consumers("my_stream", "my_group")
+ [
+ {
+ b"name": b"Alice",
+ b"pending": 1,
+ b"idle": 9104628,
+ b"inactive": 18104698, # The "inactive" field was added in Valkey version 7.2.0.
+ },
+ {
+ b"name": b"Bob",
+ b"pending": 1,
+ b"idle": 83841983,
+ b"inactive": 993841998,
+ }
+ ]
+ # The list of consumers and their attributes for consumer group "my_group" of stream "my_stream".
+ """
+ return cast(
+ List[Mapping[bytes, Union[bytes, int]]],
+ self._execute_command(RequestType.XInfoConsumers, [key, group_name]),
+ )
+
+ def xinfo_stream(
+ self,
+ key: TEncodable,
+ ) -> TXInfoStreamResponse:
+ """
+ Returns information about the stream stored at `key`. To get more detailed information, use `xinfo_stream_full`.
+
+ See https://valkey.io/commands/xinfo-stream for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+
+ Returns:
+ TXInfoStreamResponse: A mapping of stream information for the given `key`. See the example for a sample
+ response.
+
+ Examples:
+ >>> client.xinfo_stream("my_stream")
+ {
+ b"length": 4,
+ b"radix-tree-keys": 1L,
+ b"radix-tree-nodes": 2L,
+ b"last-generated-id": b"1719877599564-0",
+ b"max-deleted-entry-id": b"0-0", # This field was added in Valkey version 7.0.0.
+ b"entries-added": 4L, # This field was added in Valkey version 7.0.0.
+ b"recorded-first-entry-id": b"1719710679916-0", # This field was added in Valkey version 7.0.0.
+ b"groups": 1L,
+ b"first-entry": [
+ b"1719710679916-0",
+ [b"foo1", b"bar1", b"foo2", b"bar2"],
+ ],
+ b"last-entry": [
+ b"1719877599564-0",
+ [b"field1", b"value1"],
+ ],
+ }
+ # Stream information for "my_stream". Note that "first-entry" and "last-entry" could both be `None` if
+ # the stream is empty.
+ """
+ return cast(
+ TXInfoStreamResponse,
+ self._execute_command(RequestType.XInfoStream, [key]),
+ )
+
+ def xinfo_stream_full(
+ self,
+ key: TEncodable,
+ count: Optional[int] = None,
+ ) -> TXInfoStreamFullResponse:
+ """
+ Returns verbose information about the stream stored at `key`.
+
+ See https://valkey.io/commands/xinfo-stream for more details.
+
+ Args:
+ key (TEncodable): The key of the stream.
+ count (Optional[int]): The number of stream and PEL entries that are returned. A value of `0` means that all
+ entries will be returned. If not provided, defaults to `10`.
+
+ Returns:
+ TXInfoStreamFullResponse: A mapping of detailed stream information for the given `key`. See the example for
+ a sample response.
+
+ Examples:
+ >>> client.xinfo_stream_full("my_stream")
+ {
+ b"length": 4,
+ b"radix-tree-keys": 1L,
+ b"radix-tree-nodes": 2L,
+ b"last-generated-id": b"1719877599564-0",
+ b"max-deleted-entry-id": b"0-0", # This field was added in Valkey version 7.0.0.
+ b"entries-added": 4L, # This field was added in Valkey version 7.0.0.
+ b"recorded-first-entry-id": b"1719710679916-0", # This field was added in Valkey version 7.0.0.
+ b"entries": [
+ [
+ b"1719710679916-0",
+ [b"foo1", b"bar1", b"foo2", b"bar2"],
+ ],
+ [
+ b"1719877599564-0":
+ [b"field1", b"value1"],
+ ]
+ ],
+ b"groups": [
+ {
+ b"name": b"mygroup",
+ b"last-delivered-id": b"1719710688676-0",
+ b"entries-read": 2, # This field was added in Valkey version 7.0.0.
+ b"lag": 0, # This field was added in Valkey version 7.0.0.
+ b"pel-count": 2,
+ b"pending": [
+ [
+ b"1719710679916-0",
+ b"Alice",
+ 1719710707260,
+ 1,
+ ],
+ [
+ b"1719710688676-0",
+ b"Alice",
+ 1719710718373,
+ 1,
+ ],
+ ],
+ b"consumers": [
+ {
+ b"name": b"Alice",
+ b"seen-time": 1719710718373,
+ b"active-time": 1719710718373, # This field was added in Valkey version 7.2.0.
+ b"pel-count": 2,
+ b"pending": [
+ [
+ b"1719710679916-0",
+ 1719710707260,
+ 1
+ ],
+ [
+ b"1719710688676-0",
+ 1719710718373,
+ 1
+ ]
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ # Detailed stream information for "my_stream".
+
+ Since: Valkey version 6.0.0.
+ """
+ args = [key, "FULL"]
+ if count is not None:
+ args.extend(["COUNT", str(count)])
+
+ return cast(
+ TXInfoStreamFullResponse,
+ self._execute_command(RequestType.XInfoStream, args),
+ )
+
+ def geoadd(
+ self,
+ key: TEncodable,
+ members_geospatialdata: Mapping[TEncodable, GeospatialData],
+ existing_options: Optional[ConditionalChange] = None,
+ changed: bool = False,
+ ) -> int:
+ """
+ Adds geospatial members with their positions to the specified sorted set stored at `key`.
+ If a member is already a part of the sorted set, its position is updated.
+
+ See https://valkey.io/commands/geoadd for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ members_geospatialdata (Mapping[TEncodable, GeospatialData]): A mapping of member names to their corresponding positions. See `GeospatialData`.
+ The command will report an error when the user attempts to index coordinates outside the specified ranges.
+ existing_options (Optional[ConditionalChange]): Options for handling existing members.
+ - NX: Only add new elements.
+ - XX: Only update existing elements.
+ changed (bool): Modify the return value to return the number of changed elements, instead of the number of new elements added.
+
+ Returns:
+ int: The number of elements added to the sorted set.
+ If `changed` is set, returns the number of elements updated in the sorted set.
+
+ Examples:
+ >>> client.geoadd("my_sorted_set", {"Palermo": GeospatialData(13.361389, 38.115556), "Catania": GeospatialData(15.087269, 37.502669)})
+ 2 # Indicates that two elements have been added to the sorted set "my_sorted_set".
+ >>> client.geoadd("my_sorted_set", {"Palermo": GeospatialData(14.361389, 38.115556)}, existing_options=ConditionalChange.XX, changed=True)
+ 1 # Updates the position of an existing member in the sorted set "my_sorted_set".
+ """
+ args = [key]
+ if existing_options:
+ args.append(existing_options.value)
+
+ if changed:
+ args.append("CH")
+
+ members_geospatialdata_list = [
+ coord
+ for member, position in members_geospatialdata.items()
+ for coord in [str(position.longitude), str(position.latitude), member]
+ ]
+ args += members_geospatialdata_list
+
+ return cast(
+ int,
+ self._execute_command(RequestType.GeoAdd, args),
+ )
+
+ def geodist(
+ self,
+ key: TEncodable,
+ member1: TEncodable,
+ member2: TEncodable,
+ unit: Optional[GeoUnit] = None,
+ ) -> Optional[float]:
+ """
+ Returns the distance between two members in the geospatial index stored at `key`.
+
+ See https://valkey.io/commands/geodist for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ member1 (TEncodable): The name of the first member.
+ member2 (TEncodable): The name of the second member.
+ unit (Optional[GeoUnit]): The unit of distance measurement. See `GeoUnit`.
+ If not specified, the default unit is `METERS`.
+
+ Returns:
+ Optional[float]: The distance between `member1` and `member2`.
+ If one or both members do not exist, or if the key does not exist, returns None.
+
+ Examples:
+ >>> client.geoadd("my_geo_set", {"Palermo": GeospatialData(13.361389, 38.115556), "Catania": GeospatialData(15.087269, 37.502669)})
+ >>> client.geodist("my_geo_set", "Palermo", "Catania")
+ 166274.1516 # Indicates the distance between "Palermo" and "Catania" in meters.
+ >>> client.geodist("my_geo_set", "Palermo", "Palermo", unit=GeoUnit.KILOMETERS)
+ 166.2742 # Indicates the distance between "Palermo" and "Palermo" in kilometers.
+ >>> client.geodist("my_geo_set", "non-existing", "Palermo", unit=GeoUnit.KILOMETERS)
+ None # Returns None for non-existing member.
+ """
+ args = [key, member1, member2]
+ if unit:
+ args.append(unit.value)
+
+ return cast(
+ Optional[float],
+ self._execute_command(RequestType.GeoDist, args),
+ )
+
+ def geohash(
+ self, key: TEncodable, members: List[TEncodable]
+ ) -> List[Optional[bytes]]:
+ """
+ Returns the GeoHash bytes strings representing the positions of all the specified members in the sorted set stored at
+ `key`.
+
+ See https://valkey.io/commands/geohash for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ members (List[TEncodable]): The list of members whose GeoHash bytes strings are to be retrieved.
+
+ Returns:
+ List[Optional[bytes]]: A list of GeoHash bytes strings representing the positions of the specified members stored at `key`.
+ If a member does not exist in the sorted set, a None value is returned for that member.
+
+ Examples:
+ >>> client.geoadd("my_geo_sorted_set", {"Palermo": GeospatialData(13.361389, 38.115556), "Catania": GeospatialData(15.087269, 37.502669)})
+ >>> client.geohash("my_geo_sorted_set", ["Palermo", "Catania", "some city])
+ ["sqc8b49rny0", "sqdtr74hyu0", None] # Indicates the GeoHash bytes strings for the specified members.
+ """
+ return cast(
+ List[Optional[bytes]],
+ self._execute_command(RequestType.GeoHash, [key] + members),
+ )
+
+ def geopos(
+ self,
+ key: TEncodable,
+ members: List[TEncodable],
+ ) -> List[Optional[List[float]]]:
+ """
+ Returns the positions (longitude and latitude) of all the given members of a geospatial index in the sorted set stored at
+ `key`.
+
+ See https://valkey.io/commands/geopos for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ members (List[TEncodable]): The members for which to get the positions.
+
+ Returns:
+ List[Optional[List[float]]]: A list of positions (longitude and latitude) corresponding to the given members.
+ If a member does not exist, its position will be None.
+
+ Example:
+ >>> client.geoadd("my_geo_sorted_set", {"Palermo": GeospatialData(13.361389, 38.115556), "Catania": GeospatialData(15.087269, 37.502669)})
+ >>> client.geopos("my_geo_sorted_set", ["Palermo", "Catania", "NonExisting"])
+ [[13.36138933897018433, 38.11555639549629859], [15.08726745843887329, 37.50266842333162032], None]
+ """
+ return cast(
+ List[Optional[List[float]]],
+ self._execute_command(RequestType.GeoPos, [key] + members),
+ )
+
+ def geosearch(
+ self,
+ key: TEncodable,
+ search_from: Union[str, bytes, GeospatialData],
+ search_by: Union[GeoSearchByRadius, GeoSearchByBox],
+ order_by: Optional[OrderBy] = None,
+ count: Optional[GeoSearchCount] = None,
+ with_coord: bool = False,
+ with_dist: bool = False,
+ with_hash: bool = False,
+ ) -> List[Union[bytes, List[Union[bytes, float, int, List[float]]]]]:
+ """
+ Searches for members in a sorted set stored at `key` representing geospatial data within a circular or rectangular area.
+
+ See https://valkey.io/commands/geosearch/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set representing geospatial data.
+ search_from (Union[str, bytes, GeospatialData]): The location to search from. Can be specified either as a member
+ from the sorted set or as a geospatial data (see `GeospatialData`).
+ search_by (Union[GeoSearchByRadius, GeoSearchByBox]): The search criteria.
+ For circular area search, see `GeoSearchByRadius`.
+ For rectangular area search, see `GeoSearchByBox`.
+ order_by (Optional[OrderBy]): Specifies the order in which the results should be returned.
+ - `ASC`: Sorts items from the nearest to the farthest, relative to the center point.
+ - `DESC`: Sorts items from the farthest to the nearest, relative to the center point.
+ If not specified, the results would be unsorted.
+ count (Optional[GeoSearchCount]): Specifies the maximum number of results to return. See `GeoSearchCount`.
+ If not specified, return all results.
+ with_coord (bool): Whether to include coordinates of the returned items. Defaults to False.
+ with_dist (bool): Whether to include distance from the center in the returned items.
+ The distance is returned in the same unit as specified for the `search_by` arguments. Defaults to False.
+ with_hash (bool): Whether to include geohash of the returned items. Defaults to False.
+
+ Returns:
+ List[Union[bytes, List[Union[bytes, float, int, List[float]]]]]: By default, returns a list of members (locations) names.
+ If any of `with_coord`, `with_dist` or `with_hash` are True, returns an array of arrays, we're each sub array represents a single item in the following order:
+ (bytes): The member (location) name.
+ (float): The distance from the center as a floating point number, in the same unit specified in the radius, if `with_dist` is set to True.
+ (int): The Geohash integer, if `with_hash` is set to True.
+ List[float]: The coordinates as a two item [longitude,latitude] array, if `with_coord` is set to True.
+
+ Examples:
+ >>> client.geoadd("my_geo_sorted_set", {"edge1": GeospatialData(12.758489, 38.788135), "edge2": GeospatialData(17.241510, 38.788135)}})
+ >>> client.geoadd("my_geo_sorted_set", {"Palermo": GeospatialData(13.361389, 38.115556), "Catania": GeospatialData(15.087269, 37.502669)})
+ >>> client.geosearch("my_geo_sorted_set", "Catania", GeoSearchByRadius(175, GeoUnit.MILES), OrderBy.DESC)
+ ['Palermo', 'Catania'] # Returned the locations names within the radius of 175 miles, with the center being 'Catania' from farthest to nearest.
+ >>> client.geosearch("my_geo_sorted_set", GeospatialData(15, 37), GeoSearchByBox(400, 400, GeoUnit.KILOMETERS), OrderBy.DESC, with_coord=true, with_dist=true, with_hash=true)
+ [
+ [
+ b"Catania",
+ [56.4413, 3479447370796909, [15.087267458438873, 37.50266842333162]],
+ ],
+ [
+ b"Palermo",
+ [190.4424, 3479099956230698, [13.361389338970184, 38.1155563954963]],
+ ],
+ [
+ b"edge2",
+ [279.7403, 3481342659049484, [17.241510450839996, 38.78813451624225]],
+ ],
+ [
+ b"edge1",
+ [279.7405, 3479273021651468, [12.75848776102066, 38.78813451624225]],
+ ],
+ ] # Returns locations within the square box of 400 km, with the center being a specific point, from nearest to farthest with the dist, hash and coords.
+
+ Since: Valkey version 6.2.0.
+ """
+ args = _create_geosearch_args(
+ [key],
+ search_from,
+ search_by,
+ order_by,
+ count,
+ with_coord,
+ with_dist,
+ with_hash,
+ )
+
+ return cast(
+ List[Union[bytes, List[Union[bytes, float, int, List[float]]]]],
+ self._execute_command(RequestType.GeoSearch, args),
+ )
+
+ def geosearchstore(
+ self,
+ destination: TEncodable,
+ source: TEncodable,
+ search_from: Union[str, bytes, GeospatialData],
+ search_by: Union[GeoSearchByRadius, GeoSearchByBox],
+ count: Optional[GeoSearchCount] = None,
+ store_dist: bool = False,
+ ) -> int:
+ """
+ Searches for members in a sorted set stored at `key` representing geospatial data within a circular or rectangular area and stores the result in `destination`.
+ If `destination` already exists, it is overwritten. Otherwise, a new sorted set will be created.
+
+ To get the result directly, see `geosearch`.
+
+ Note:
+ When in cluster mode, both `source` and `destination` must map to the same hash slot.
+
+ Args:
+ destination (TEncodable): The key to store the search results.
+ source (TEncodable): The key of the sorted set representing geospatial data to search from.
+ search_from (Union[str, bytes, GeospatialData]): The location to search from. Can be specified either as a member
+ from the sorted set or as a geospatial data (see `GeospatialData`).
+ search_by (Union[GeoSearchByRadius, GeoSearchByBox]): The search criteria.
+ For circular area search, see `GeoSearchByRadius`.
+ For rectangular area search, see `GeoSearchByBox`.
+ count (Optional[GeoSearchCount]): Specifies the maximum number of results to store. See `GeoSearchCount`.
+ If not specified, stores all results.
+ store_dist (bool): Determines what is stored as the sorted set score. Defaults to False.
+ - If set to False, the geohash of the location will be stored as the sorted set score.
+ - If set to True, the distance from the center of the shape (circle or box) will be stored as the sorted set score.
+ The distance is represented as a floating-point number in the same unit specified for that shape.
+
+ Returns:
+ int: The number of elements in the resulting sorted set stored at `destination`.
+
+ Examples:
+ >>> client.geoadd("my_geo_sorted_set", {"Palermo": GeospatialData(13.361389, 38.115556), "Catania": GeospatialData(15.087269, 37.502669)})
+ >>> client.geosearchstore("my_dest_sorted_set", "my_geo_sorted_set", "Catania", GeoSearchByRadius(175, GeoUnit.MILES))
+ 2 # Number of elements stored in "my_dest_sorted_set".
+ >>> client.zrange_withscores("my_dest_sorted_set", RangeByIndex(0, -1))
+ {b"Palermo": 3479099956230698.0, b"Catania": 3479447370796909.0} # The elements within te search area, with their geohash as score.
+ >>> client.geosearchstore("my_dest_sorted_set", "my_geo_sorted_set", GeospatialData(15, 37), GeoSearchByBox(400, 400, GeoUnit.KILOMETERS), store_dist=True)
+ 2 # Number of elements stored in "my_dest_sorted_set", with distance as score.
+ >>> client.zrange_withscores("my_dest_sorted_set", RangeByIndex(0, -1))
+ {b"Catania": 56.4412578701582, b"Palermo": 190.44242984775784} # The elements within te search area, with the distance as score.
+
+ Since: Valkey version 6.2.0.
+ """
+ args = _create_geosearch_args(
+ [destination, source],
+ search_from,
+ search_by,
+ None,
+ count,
+ False,
+ False,
+ False,
+ store_dist,
+ )
+
+ return cast(
+ int,
+ self._execute_command(RequestType.GeoSearchStore, args),
+ )
+
+ def zadd(
+ self,
+ key: TEncodable,
+ members_scores: Mapping[TEncodable, float],
+ existing_options: Optional[ConditionalChange] = None,
+ update_condition: Optional[UpdateOptions] = None,
+ changed: bool = False,
+ ) -> int:
+ """
+ Adds members with their scores to the sorted set stored at `key`.
+ If a member is already a part of the sorted set, its score is updated.
+
+ See https://valkey.io/commands/zadd/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ members_scores (Mapping[TEncodable, float]): A mapping of members to their corresponding scores.
+ existing_options (Optional[ConditionalChange]): Options for handling existing members.
+ - NX: Only add new elements.
+ - XX: Only update existing elements.
+ update_condition (Optional[UpdateOptions]): Options for updating scores.
+ - GT: Only update scores greater than the current values.
+ - LT: Only update scores less than the current values.
+ changed (bool): Modify the return value to return the number of changed elements, instead of the number of new elements added.
+
+ Returns:
+ int: The number of elements added to the sorted set.
+ If `changed` is set, returns the number of elements updated in the sorted set.
+
+ Examples:
+ >>> client.zadd("my_sorted_set", {"member1": 10.5, "member2": 8.2})
+ 2 # Indicates that two elements have been added to the sorted set "my_sorted_set."
+ >>> client.zadd("existing_sorted_set", {"member1": 15.0, "member2": 5.5}, existing_options=ConditionalChange.XX, changed=True)
+ 2 # Updates the scores of two existing members in the sorted set "existing_sorted_set."
+ """
+ args = [key]
+ if existing_options:
+ args.append(existing_options.value)
+
+ if update_condition:
+ args.append(update_condition.value)
+
+ if changed:
+ args.append("CH")
+
+ if existing_options and update_condition:
+ if existing_options == ConditionalChange.ONLY_IF_DOES_NOT_EXIST:
+ raise ValueError(
+ "The GT, LT and NX options are mutually exclusive. "
+ f"Cannot choose both {update_condition.value} and NX."
+ )
+
+ members_scores_list = [
+ str(item) for pair in members_scores.items() for item in pair[::-1]
+ ]
+ args += members_scores_list
+
+ return cast(
+ int,
+ self._execute_command(RequestType.ZAdd, args),
+ )
+
+ def zadd_incr(
+ self,
+ key: TEncodable,
+ member: TEncodable,
+ increment: float,
+ existing_options: Optional[ConditionalChange] = None,
+ update_condition: Optional[UpdateOptions] = None,
+ ) -> Optional[float]:
+ """
+ Increments the score of member in the sorted set stored at `key` by `increment`.
+ If `member` does not exist in the sorted set, it is added with `increment` as its score (as if its previous score was 0.0).
+ If `key` does not exist, a new sorted set with the specified member as its sole member is created.
+
+ See https://valkey.io/commands/zadd/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ member (TEncodable): A member in the sorted set to increment.
+ increment (float): The score to increment the member.
+ existing_options (Optional[ConditionalChange]): Options for handling the member's existence.
+ - NX: Only increment a member that doesn't exist.
+ - XX: Only increment an existing member.
+ update_condition (Optional[UpdateOptions]): Options for updating the score.
+ - GT: Only increment the score of the member if the new score will be greater than the current score.
+ - LT: Only increment (decrement) the score of the member if the new score will be less than the current score.
+
+ Returns:
+ Optional[float]: The score of the member.
+ If there was a conflict with choosing the XX/NX/LT/GT options, the operation aborts and None is returned.
+
+ Examples:
+ >>> client.zadd_incr("my_sorted_set", member , 5.0)
+ 5.0
+ >>> client.zadd_incr("existing_sorted_set", member , "3.0" , UpdateOptions.LESS_THAN)
+ None
+ """
+ args = [key]
+ if existing_options:
+ args.append(existing_options.value)
+
+ if update_condition:
+ args.append(update_condition.value)
+
+ args.append("INCR")
+
+ if existing_options and update_condition:
+ if existing_options == ConditionalChange.ONLY_IF_DOES_NOT_EXIST:
+ raise ValueError(
+ "The GT, LT and NX options are mutually exclusive. "
+ f"Cannot choose both {update_condition.value} and NX."
+ )
+
+ args += [str(increment), member]
+ return cast(
+ Optional[float],
+ self._execute_command(RequestType.ZAdd, args),
+ )
+
+ def zcard(self, key: TEncodable) -> int:
+ """
+ Returns the cardinality (number of elements) of the sorted set stored at `key`.
+
+ See https://valkey.io/commands/zcard/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+
+ Returns:
+ int: The number of elements in the sorted set.
+ If `key` does not exist, it is treated as an empty sorted set, and the command returns 0.
+
+ Examples:
+ >>> client.zcard("my_sorted_set")
+ 3 # Indicates that there are 3 elements in the sorted set "my_sorted_set".
+ >>> client.zcard("non_existing_key")
+ 0
+ """
+ return cast(int, self._execute_command(RequestType.ZCard, [key]))
+
+ def zcount(
+ self,
+ key: TEncodable,
+ min_score: Union[InfBound, ScoreBoundary],
+ max_score: Union[InfBound, ScoreBoundary],
+ ) -> int:
+ """
+ Returns the number of members in the sorted set stored at `key` with scores between `min_score` and `max_score`.
+
+ See https://valkey.io/commands/zcount/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ min_score (Union[InfBound, ScoreBoundary]): The minimum score to count from.
+ Can be an instance of InfBound representing positive/negative infinity,
+ or ScoreBoundary representing a specific score and inclusivity.
+ max_score (Union[InfBound, ScoreBoundary]): The maximum score to count up to.
+ Can be an instance of InfBound representing positive/negative infinity,
+ or ScoreBoundary representing a specific score and inclusivity.
+
+ Returns:
+ int: The number of members in the specified score range.
+ If `key` does not exist, it is treated as an empty sorted set, and the command returns 0.
+ If `max_score` < `min_score`, 0 is returned.
+
+ Examples:
+ >>> client.zcount("my_sorted_set", ScoreBoundary(5.0 , is_inclusive=true) , InfBound.POS_INF)
+ 2 # Indicates that there are 2 members with scores between 5.0 (not exclusive) and +inf in the sorted set "my_sorted_set".
+ >>> client.zcount("my_sorted_set", ScoreBoundary(5.0 , is_inclusive=true) , ScoreBoundary(10.0 , is_inclusive=false))
+ 1 # Indicates that there is one ScoreBoundary with 5.0 < score <= 10.0 in the sorted set "my_sorted_set".
+ """
+ score_min = (
+ min_score.value["score_arg"]
+ if type(min_score) == InfBound
+ else min_score.value
+ )
+ score_max = (
+ max_score.value["score_arg"]
+ if type(max_score) == InfBound
+ else max_score.value
+ )
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.ZCount, [key, score_min, score_max]
+ ),
+ )
+
+ def zincrby(
+ self, key: TEncodable, increment: float, member: TEncodable
+ ) -> float:
+ """
+ Increments the score of `member` in the sorted set stored at `key` by `increment`.
+ If `member` does not exist in the sorted set, it is added with `increment` as its score.
+ If `key` does not exist, a new sorted set is created with the specified member as its sole member.
+
+ See https://valkey.io/commands/zincrby/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ increment (float): The score increment.
+ member (TEncodable): A member of the sorted set.
+
+ Returns:
+ float: The new score of `member`.
+
+ Examples:
+ >>> client.zadd("my_sorted_set", {"member": 10.5, "member2": 8.2})
+ >>> client.zincrby("my_sorted_set", 1.2, "member")
+ 11.7 # The member existed in the set before score was altered, the new score is 11.7.
+ >>> client.zincrby("my_sorted_set", -1.7, "member")
+ 10.0 # Negative increment, decrements the score.
+ >>> client.zincrby("my_sorted_set", 5.5, "non_existing_member")
+ 5.5 # A new member is added to the sorted set with the score being 5.5.
+ """
+ return cast(
+ float,
+ self._execute_command(
+ RequestType.ZIncrBy, [key, str(increment), member]
+ ),
+ )
+
+ def zpopmax(
+ self, key: TEncodable, count: Optional[int] = None
+ ) -> Mapping[bytes, float]:
+ """
+ Removes and returns the members with the highest scores from the sorted set stored at `key`.
+ If `count` is provided, up to `count` members with the highest scores are removed and returned.
+ Otherwise, only one member with the highest score is removed and returned.
+
+ See https://valkey.io/commands/zpopmax for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ count (Optional[int]): Specifies the quantity of members to pop. If not specified, pops one member.
+ If `count` is higher than the sorted set's cardinality, returns all members and their scores, ordered from highest to lowest.
+
+ Returns:
+ Mapping[bytes, float]: A map of the removed members and their scores, ordered from the one with the highest score to the one with the lowest.
+ If `key` doesn't exist, it will be treated as an empy sorted set and the command returns an empty map.
+
+ Examples:
+ >>> client.zpopmax("my_sorted_set")
+ {b'member1': 10.0} # Indicates that 'member1' with a score of 10.0 has been removed from the sorted set.
+ >>> client.zpopmax("my_sorted_set", 2)
+ {b'member2': 8.0, b'member3': 7.5} # Indicates that 'member2' with a score of 8.0 and 'member3' with a score of 7.5 have been removed from the sorted set.
+ """
+ return cast(
+ Mapping[bytes, float],
+ self._execute_command(
+ RequestType.ZPopMax, [key, str(count)] if count else [key]
+ ),
+ )
+
+ def bzpopmax(
+ self, keys: List[TEncodable], timeout: float
+ ) -> Optional[List[Union[bytes, float]]]:
+ """
+ Pops the member with the highest score from the first non-empty sorted set, with the given keys being checked in
+ the order that they are given. Blocks the connection when there are no members to remove from any of the given
+ sorted sets.
+
+ When in cluster mode, all keys must map to the same hash slot.
+
+ `BZPOPMAX` is the blocking variant of `ZPOPMAX`.
+
+ `BZPOPMAX` is a client blocking command, see https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#blocking-commands for more details and best practices.
+
+ See https://valkey.io/commands/bzpopmax for more details.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sorted sets.
+ timeout (float): The number of seconds to wait for a blocking operation to complete.
+ A value of 0 will block indefinitely.
+
+ Returns:
+ Optional[List[Union[bytes, float]]]: An array containing the key where the member was popped out, the member itself,
+ and the member score. If no member could be popped and the `timeout` expired, returns None.
+
+ Examples:
+ >>> client.zadd("my_sorted_set1", {"member1": 10.0, "member2": 5.0})
+ 2 # Two elements have been added to the sorted set at "my_sorted_set1".
+ >>> client.bzpopmax(["my_sorted_set1", "my_sorted_set2"], 0.5)
+ [b'my_sorted_set1', b'member1', 10.0] # "member1" with a score of 10.0 has been removed from "my_sorted_set1".
+ """
+ return cast(
+ Optional[List[Union[bytes, float]]],
+ self._execute_command(RequestType.BZPopMax, keys + [str(timeout)]),
+ )
+
+ def zpopmin(
+ self, key: TEncodable, count: Optional[int] = None
+ ) -> Mapping[bytes, float]:
+ """
+ Removes and returns the members with the lowest scores from the sorted set stored at `key`.
+ If `count` is provided, up to `count` members with the lowest scores are removed and returned.
+ Otherwise, only one member with the lowest score is removed and returned.
+
+ See https://valkey.io/commands/zpopmin for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ count (Optional[int]): Specifies the quantity of members to pop. If not specified, pops one member.
+ If `count` is higher than the sorted set's cardinality, returns all members and their scores.
+
+ Returns:
+ Mapping[bytes, float]: A map of the removed members and their scores, ordered from the one with the lowest score to the one with the highest.
+ If `key` doesn't exist, it will be treated as an empy sorted set and the command returns an empty map.
+
+ Examples:
+ >>> client.zpopmin("my_sorted_set")
+ {b'member1': 5.0} # Indicates that 'member1' with a score of 5.0 has been removed from the sorted set.
+ >>> client.zpopmin("my_sorted_set", 2)
+ {b'member3': 7.5 , b'member2': 8.0} # Indicates that 'member3' with a score of 7.5 and 'member2' with a score of 8.0 have been removed from the sorted set.
+ """
+ args: List[TEncodable] = [key, str(count)] if count else [key]
+ return cast(
+ Mapping[bytes, float],
+ self._execute_command(RequestType.ZPopMin, args),
+ )
+
+ def bzpopmin(
+ self, keys: List[TEncodable], timeout: float
+ ) -> Optional[List[Union[bytes, float]]]:
+ """
+ Pops the member with the lowest score from the first non-empty sorted set, with the given keys being checked in
+ the order that they are given. Blocks the connection when there are no members to remove from any of the given
+ sorted sets.
+
+ When in cluster mode, all keys must map to the same hash slot.
+
+ `BZPOPMIN` is the blocking variant of `ZPOPMIN`.
+
+ `BZPOPMIN` is a client blocking command, see https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#blocking-commands for more details and best practices.
+
+ See https://valkey.io/commands/bzpopmin for more details.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sorted sets.
+ timeout (float): The number of seconds to wait for a blocking operation to complete.
+ A value of 0 will block indefinitely.
+
+ Returns:
+ Optional[List[Union[bytes, float]]]: An array containing the key where the member was popped out, the member itself,
+ and the member score. If no member could be popped and the `timeout` expired, returns None.
+
+ Examples:
+ >>> client.zadd("my_sorted_set1", {"member1": 10.0, "member2": 5.0})
+ 2 # Two elements have been added to the sorted set at "my_sorted_set1".
+ >>> client.bzpopmin(["my_sorted_set1", "my_sorted_set2"], 0.5)
+ [b'my_sorted_set1', b'member2', 5.0] # "member2" with a score of 5.0 has been removed from "my_sorted_set1".
+ """
+ args: List[TEncodable] = keys + [str(timeout)]
+ return cast(
+ Optional[List[Union[bytes, float]]],
+ self._execute_command(RequestType.BZPopMin, args),
+ )
+
+ def zrange(
+ self,
+ key: TEncodable,
+ range_query: Union[RangeByIndex, RangeByLex, RangeByScore],
+ reverse: bool = False,
+ ) -> List[bytes]:
+ """
+ Returns the specified range of elements in the sorted set stored at `key`.
+
+ ZRANGE can perform different types of range queries: by index (rank), by the score, or by lexicographical order.
+
+ See https://valkey.io/commands/zrange/ for more details.
+
+ To get the elements with their scores, see zrange_withscores.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ range_query (Union[RangeByIndex, RangeByLex, RangeByScore]): The range query object representing the type of range query to perform.
+ - For range queries by index (rank), use RangeByIndex.
+ - For range queries by lexicographical order, use RangeByLex.
+ - For range queries by score, use RangeByScore.
+ reverse (bool): If True, reverses the sorted set, with index 0 as the element with the highest score.
+
+ Returns:
+ List[bytes]: A list of elements within the specified range.
+ If `key` does not exist, it is treated as an empty sorted set, and the command returns an empty array.
+
+ Examples:
+ >>> client.zrange("my_sorted_set", RangeByIndex(0, -1))
+ [b'member1', b'member2', b'member3'] # Returns all members in ascending order.
+ >>> client.zrange("my_sorted_set", RangeByScore(InfBound.NEG_INF, ScoreBoundary(3)))
+ [b'member2', b'member3'] # Returns members with scores within the range of negative infinity to 3, in ascending order.
+ """
+ args = _create_zrange_args(key, range_query, reverse, with_scores=False)
+
+ return cast(List[bytes], self._execute_command(RequestType.ZRange, args))
+
+ def zrange_withscores(
+ self,
+ key: TEncodable,
+ range_query: Union[RangeByIndex, RangeByScore],
+ reverse: bool = False,
+ ) -> Mapping[bytes, float]:
+ """
+ Returns the specified range of elements with their scores in the sorted set stored at `key`.
+ Similar to ZRANGE but with a WITHSCORE flag.
+
+ See https://valkey.io/commands/zrange/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ range_query (Union[RangeByIndex, RangeByScore]): The range query object representing the type of range query to perform.
+ - For range queries by index (rank), use RangeByIndex.
+ - For range queries by score, use RangeByScore.
+ reverse (bool): If True, reverses the sorted set, with index 0 as the element with the highest score.
+
+ Returns:
+ Mapping[bytes , float]: A map of elements and their scores within the specified range.
+ If `key` does not exist, it is treated as an empty sorted set, and the command returns an empty map.
+
+ Examples:
+ >>> client.zrange_withscores("my_sorted_set", RangeByScore(ScoreBoundary(10), ScoreBoundary(20)))
+ {b'member1': 10.5, b'member2': 15.2} # Returns members with scores between 10 and 20 with their scores.
+ >>> client.zrange_withscores("my_sorted_set", RangeByScore(InfBound.NEG_INF, ScoreBoundary(3)))
+ {b'member4': -2.0, b'member7': 1.5} # Returns members with with scores within the range of negative infinity to 3, with their scores.
+ """
+ args = _create_zrange_args(key, range_query, reverse, with_scores=True)
+
+ return cast(
+ Mapping[bytes, float], self._execute_command(RequestType.ZRange, args)
+ )
+
+ def zrangestore(
+ self,
+ destination: TEncodable,
+ source: TEncodable,
+ range_query: Union[RangeByIndex, RangeByLex, RangeByScore],
+ reverse: bool = False,
+ ) -> int:
+ """
+ Stores a specified range of elements from the sorted set at `source`, into a new sorted set at `destination`. If
+ `destination` doesn't exist, a new sorted set is created; if it exists, it's overwritten.
+
+ ZRANGESTORE can perform different types of range queries: by index (rank), by the score, or by lexicographical
+ order.
+
+ See https://valkey.io/commands/zrangestore for more details.
+
+ Note:
+ When in Cluster mode, `source` and `destination` must map to the same hash slot.
+
+ Args:
+ destination (TEncodable): The key for the destination sorted set.
+ source (TEncodable): The key of the source sorted set.
+ range_query (Union[RangeByIndex, RangeByLex, RangeByScore]): The range query object representing the type of range query to perform.
+ - For range queries by index (rank), use RangeByIndex.
+ - For range queries by lexicographical order, use RangeByLex.
+ - For range queries by score, use RangeByScore.
+ reverse (bool): If True, reverses the sorted set, with index 0 as the element with the highest score.
+
+ Returns:
+ int: The number of elements in the resulting sorted set.
+
+ Examples:
+ >>> client.zrangestore("destination_key", "my_sorted_set", RangeByIndex(0, 2), True)
+ 3 # The 3 members with the highest scores from "my_sorted_set" were stored in the sorted set at "destination_key".
+ >>> client.zrangestore("destination_key", "my_sorted_set", RangeByScore(InfBound.NEG_INF, ScoreBoundary(3)))
+ 2 # The 2 members with scores between negative infinity and 3 (inclusive) from "my_sorted_set" were stored in the sorted set at "destination_key".
+ """
+ args = _create_zrange_args(source, range_query, reverse, False, destination)
+
+ return cast(int, self._execute_command(RequestType.ZRangeStore, args))
+
+ def zrank(
+ self,
+ key: TEncodable,
+ member: TEncodable,
+ ) -> Optional[int]:
+ """
+ Returns the rank of `member` in the sorted set stored at `key`, with scores ordered from low to high.
+
+ See https://valkey.io/commands/zrank for more details.
+
+ To get the rank of `member` with its score, see `zrank_withscore`.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ member (TEncodable): The member whose rank is to be retrieved.
+
+ Returns:
+ Optional[int]: The rank of `member` in the sorted set.
+ If `key` doesn't exist, or if `member` is not present in the set, None will be returned.
+
+ Examples:
+ >>> client.zrank("my_sorted_set", "member2")
+ 1 # Indicates that "member2" has the second-lowest score in the sorted set "my_sorted_set".
+ >>> client.zrank("my_sorted_set", "non_existing_member")
+ None # Indicates that "non_existing_member" is not present in the sorted set "my_sorted_set".
+ """
+ return cast(
+ Optional[int], self._execute_command(RequestType.ZRank, [key, member])
+ )
+
+ def zrank_withscore(
+ self,
+ key: TEncodable,
+ member: TEncodable,
+ ) -> Optional[List[Union[int, float]]]:
+ """
+ Returns the rank of `member` in the sorted set stored at `key` with its score, where scores are ordered from the lowest to highest.
+
+ See https://valkey.io/commands/zrank for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ member (TEncodable): The member whose rank is to be retrieved.
+
+ Returns:
+ Optional[List[Union[int, float]]]: A list containing the rank and score of `member` in the sorted set.
+ If `key` doesn't exist, or if `member` is not present in the set, None will be returned.
+
+ Examples:
+ >>> client.zrank_withscore("my_sorted_set", "member2")
+ [1 , 6.0] # Indicates that "member2" with score 6.0 has the second-lowest score in the sorted set "my_sorted_set".
+ >>> client.zrank_withscore("my_sorted_set", "non_existing_member")
+ None # Indicates that "non_existing_member" is not present in the sorted set "my_sorted_set".
+
+ Since: Valkey version 7.2.0.
+ """
+ return cast(
+ Optional[List[Union[int, float]]],
+ self._execute_command(RequestType.ZRank, [key, member, "WITHSCORE"]),
+ )
+
+ def zrevrank(self, key: TEncodable, member: TEncodable) -> Optional[int]:
+ """
+ Returns the rank of `member` in the sorted set stored at `key`, where scores are ordered from the highest to
+ lowest, starting from `0`.
+
+ To get the rank of `member` with its score, see `zrevrank_withscore`.
+
+ See https://valkey.io/commands/zrevrank for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ member (TEncodable): The member whose rank is to be retrieved.
+
+ Returns:
+ Optional[int]: The rank of `member` in the sorted set, where ranks are ordered from high to low based on scores.
+ If `key` doesn't exist, or if `member` is not present in the set, `None` will be returned.
+
+ Examples:
+ >>> client.zadd("my_sorted_set", {"member1": 10.5, "member2": 8.2, "member3": 9.6})
+ >>> client.zrevrank("my_sorted_set", "member2")
+ 2 # "member2" has the third-highest score in the sorted set "my_sorted_set"
+ """
+ return cast(
+ Optional[int],
+ self._execute_command(RequestType.ZRevRank, [key, member]),
+ )
+
+ def zrevrank_withscore(
+ self, key: TEncodable, member: TEncodable
+ ) -> Optional[List[Union[int, float]]]:
+ """
+ Returns the rank of `member` in the sorted set stored at `key` with its score, where scores are ordered from the
+ highest to lowest, starting from `0`.
+
+ See https://valkey.io/commands/zrevrank for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ member (TEncodable): The member whose rank is to be retrieved.
+
+ Returns:
+ Optional[List[Union[int, float]]]: A list containing the rank (as `int`) and score (as `float`) of `member`
+ in the sorted set, where ranks are ordered from high to low based on scores.
+ If `key` doesn't exist, or if `member` is not present in the set, `None` will be returned.
+
+ Examples:
+ >>> client.zadd("my_sorted_set", {"member1": 10.5, "member2": 8.2, "member3": 9.6})
+ >>> client.zrevrank("my_sorted_set", "member2")
+ [2, 8.2] # "member2" with score 8.2 has the third-highest score in the sorted set "my_sorted_set"
+
+ Since: Valkey version 7.2.0.
+ """
+ return cast(
+ Optional[List[Union[int, float]]],
+ self._execute_command(
+ RequestType.ZRevRank, [key, member, "WITHSCORE"]
+ ),
+ )
+
+ def zrem(
+ self,
+ key: TEncodable,
+ members: List[TEncodable],
+ ) -> int:
+ """
+ Removes the specified members from the sorted set stored at `key`.
+ Specified members that are not a member of this set are ignored.
+
+ See https://valkey.io/commands/zrem/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ members (List[TEncodable]): A list of members to remove from the sorted set.
+
+ Returns:
+ int: The number of members that were removed from the sorted set, not including non-existing members.
+ If `key` does not exist, it is treated as an empty sorted set, and the command returns 0.
+
+ Examples:
+ >>> client.zrem("my_sorted_set", ["member1", "member2"])
+ 2 # Indicates that two members have been removed from the sorted set "my_sorted_set."
+ >>> client.zrem("non_existing_sorted_set", ["member1", "member2"])
+ 0 # Indicates that no members were removed as the sorted set "non_existing_sorted_set" does not exist.
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.ZRem, [key] + members),
+ )
+
+ def zremrangebyscore(
+ self,
+ key: TEncodable,
+ min_score: Union[InfBound, ScoreBoundary],
+ max_score: Union[InfBound, ScoreBoundary],
+ ) -> int:
+ """
+ Removes all elements in the sorted set stored at `key` with a score between `min_score` and `max_score`.
+
+ See https://valkey.io/commands/zremrangebyscore/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ min_score (Union[InfBound, ScoreBoundary]): The minimum score to remove from.
+ Can be an instance of InfBound representing positive/negative infinity,
+ or ScoreBoundary representing a specific score and inclusivity.
+ max_score (Union[InfBound, ScoreBoundary]): The maximum score to remove up to.
+ Can be an instance of InfBound representing positive/negative infinity,
+ or ScoreBoundary representing a specific score and inclusivity.
+ Returns:
+ int: The number of members that were removed from the sorted set.
+ If `key` does not exist, it is treated as an empty sorted set, and the command returns 0.
+ If `min_score` is greater than `max_score`, 0 is returned.
+
+ Examples:
+ >>> client.zremrangebyscore("my_sorted_set", ScoreBoundary(5.0 , is_inclusive=true) , InfBound.POS_INF)
+ 2 # Indicates that 2 members with scores between 5.0 (not exclusive) and +inf have been removed from the sorted set "my_sorted_set".
+ >>> client.zremrangebyscore("non_existing_sorted_set", ScoreBoundary(5.0 , is_inclusive=true) , ScoreBoundary(10.0 , is_inclusive=false))
+ 0 # Indicates that no members were removed as the sorted set "non_existing_sorted_set" does not exist.
+ """
+ score_min = (
+ min_score.value["score_arg"]
+ if type(min_score) == InfBound
+ else min_score.value
+ )
+ score_max = (
+ max_score.value["score_arg"]
+ if type(max_score) == InfBound
+ else max_score.value
+ )
+
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.ZRemRangeByScore, [key, score_min, score_max]
+ ),
+ )
+
+ def zremrangebylex(
+ self,
+ key: TEncodable,
+ min_lex: Union[InfBound, LexBoundary],
+ max_lex: Union[InfBound, LexBoundary],
+ ) -> int:
+ """
+ Removes all elements in the sorted set stored at `key` with a lexicographical order between `min_lex` and
+ `max_lex`.
+
+ See https://valkey.io/commands/zremrangebylex/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ min_lex (Union[InfBound, LexBoundary]): The minimum bound of the lexicographical range.
+ Can be an instance of `InfBound` representing positive/negative infinity, or `LexBoundary`
+ representing a specific lex and inclusivity.
+ max_lex (Union[InfBound, LexBoundary]): The maximum bound of the lexicographical range.
+ Can be an instance of `InfBound` representing positive/negative infinity, or `LexBoundary`
+ representing a specific lex and inclusivity.
+
+ Returns:
+ int: The number of members that were removed from the sorted set.
+ If `key` does not exist, it is treated as an empty sorted set, and the command returns `0`.
+ If `min_lex` is greater than `max_lex`, `0` is returned.
+
+ Examples:
+ >>> client.zremrangebylex("my_sorted_set", LexBoundary("a", is_inclusive=False), LexBoundary("e"))
+ 4 # Indicates that 4 members, with lexicographical values ranging from "a" (exclusive) to "e" (inclusive), have been removed from "my_sorted_set".
+ >>> client.zremrangebylex("non_existing_sorted_set", InfBound.NEG_INF, LexBoundary("e"))
+ 0 # Indicates that no members were removed as the sorted set "non_existing_sorted_set" does not exist.
+ """
+ min_lex_arg = (
+ min_lex.value["lex_arg"] if type(min_lex) == InfBound else min_lex.value
+ )
+ max_lex_arg = (
+ max_lex.value["lex_arg"] if type(max_lex) == InfBound else max_lex.value
+ )
+
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.ZRemRangeByLex, [key, min_lex_arg, max_lex_arg]
+ ),
+ )
+
+ def zremrangebyrank(
+ self,
+ key: TEncodable,
+ start: int,
+ end: int,
+ ) -> int:
+ """
+ Removes all elements in the sorted set stored at `key` with rank between `start` and `end`.
+ Both `start` and `end` are zero-based indexes with 0 being the element with the lowest score.
+ These indexes can be negative numbers, where they indicate offsets starting at the element with the highest score.
+
+ See https://valkey.io/commands/zremrangebyrank/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ start (int): The starting point of the range.
+ end (int): The end of the range.
+
+ Returns:
+ int: The number of elements that were removed.
+ If `start` exceeds the end of the sorted set, or if `start` is greater than `end`, `0` is returned.
+ If `end` exceeds the actual end of the sorted set, the range will stop at the actual end of the sorted set.
+ If `key` does not exist, `0` is returned.
+
+ Examples:
+ >>> client.zremrangebyrank("my_sorted_set", 0, 4)
+ 5 # Indicates that 5 elements, with ranks ranging from 0 to 4 (inclusive), have been removed from "my_sorted_set".
+ >>> client.zremrangebyrank("my_sorted_set", 0, 4)
+ 0 # Indicates that nothing was removed.
+ """
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.ZRemRangeByRank, [key, str(start), str(end)]
+ ),
+ )
+
+ def zlexcount(
+ self,
+ key: TEncodable,
+ min_lex: Union[InfBound, LexBoundary],
+ max_lex: Union[InfBound, LexBoundary],
+ ) -> int:
+ """
+ Returns the number of members in the sorted set stored at `key` with lexicographical values between `min_lex` and `max_lex`.
+
+ See https://valkey.io/commands/zlexcount/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ min_lex (Union[InfBound, LexBoundary]): The minimum lexicographical value to count from.
+ Can be an instance of InfBound representing positive/negative infinity,
+ or LexBoundary representing a specific lexicographical value and inclusivity.
+ max_lex (Union[InfBound, LexBoundary]): The maximum lexicographical to count up to.
+ Can be an instance of InfBound representing positive/negative infinity,
+ or LexBoundary representing a specific lexicographical value and inclusivity.
+
+ Returns:
+ int: The number of members in the specified lexicographical range.
+ If `key` does not exist, it is treated as an empty sorted set, and the command returns `0`.
+ If `max_lex < min_lex`, `0` is returned.
+
+ Examples:
+ >>> client.zlexcount("my_sorted_set", LexBoundary("c" , is_inclusive=True), InfBound.POS_INF)
+ 2 # Indicates that there are 2 members with lexicographical values between "c" (inclusive) and positive infinity in the sorted set "my_sorted_set".
+ >>> client.zlexcount("my_sorted_set", LexBoundary("c" , is_inclusive=True), LexBoundary("k" , is_inclusive=False))
+ 1 # Indicates that there is one member with LexBoundary "c" <= lexicographical value < "k" in the sorted set "my_sorted_set".
+ """
+ min_lex_arg = (
+ min_lex.value["lex_arg"] if type(min_lex) == InfBound else min_lex.value
+ )
+ max_lex_arg = (
+ max_lex.value["lex_arg"] if type(max_lex) == InfBound else max_lex.value
+ )
+
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.ZLexCount, [key, min_lex_arg, max_lex_arg]
+ ),
+ )
+
+ def zscore(self, key: TEncodable, member: TEncodable) -> Optional[float]:
+ """
+ Returns the score of `member` in the sorted set stored at `key`.
+
+ See https://valkey.io/commands/zscore/ for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ member (TEncodable): The member whose score is to be retrieved.
+
+ Returns:
+ Optional[float]: The score of the member.
+ If `member` does not exist in the sorted set, None is returned.
+ If `key` does not exist, None is returned.
+
+ Examples:
+ >>> client.zscore("my_sorted_set", "member")
+ 10.5 # Indicates that the score of "member" in the sorted set "my_sorted_set" is 10.5.
+ >>> client.zscore("my_sorted_set", "non_existing_member")
+ None
+ """
+ return cast(
+ Optional[float],
+ self._execute_command(RequestType.ZScore, [key, member]),
+ )
+
+ def zmscore(
+ self,
+ key: TEncodable,
+ members: List[TEncodable],
+ ) -> List[Optional[float]]:
+ """
+ Returns the scores associated with the specified `members` in the sorted set stored at `key`.
+
+ See https://valkey.io/commands/zmscore for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ members (List[TEncodable]): A list of members in the sorted set.
+
+ Returns:
+ List[Optional[float]]: A list of scores corresponding to `members`.
+ If a member does not exist in the sorted set, the corresponding value in the list will be None.
+
+ Examples:
+ >>> client.zmscore("my_sorted_set", ["one", "non_existent_member", "three"])
+ [1.0, None, 3.0]
+ """
+ return cast(
+ List[Optional[float]],
+ self._execute_command(RequestType.ZMScore, [key] + members),
+ )
+
+ def zdiff(self, keys: List[TEncodable]) -> List[bytes]:
+ """
+ Returns the difference between the first sorted set and all the successive sorted sets.
+ To get the elements with their scores, see `zdiff_withscores`.
+
+ When in Cluster mode, all keys must map to the same hash slot.
+
+ See https://valkey.io/commands/zdiff for more details.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sorted sets.
+
+ Returns:
+ List[bytes]: A list of elements representing the difference between the sorted sets.
+ If the first key does not exist, it is treated as an empty sorted set, and the command returns an
+ empty list.
+
+ Examples:
+ >>> client.zadd("sorted_set1", {"element1":1.0, "element2": 2.0, "element3": 3.0})
+ >>> client.zadd("sorted_set2", {"element2": 2.0})
+ >>> client.zadd("sorted_set3", {"element3": 3.0})
+ >>> client.zdiff("sorted_set1", "sorted_set2", "sorted_set3")
+ [b"element1"] # Indicates that "element1" is in "sorted_set1" but not "sorted_set2" or "sorted_set3".
+ """
+ args: List[TEncodable] = [str(len(keys))]
+ args.extend(keys)
+ return cast(
+ List[bytes],
+ self._execute_command(RequestType.ZDiff, args),
+ )
+
+ def zdiff_withscores(self, keys: List[TEncodable]) -> Mapping[bytes, float]:
+ """
+ Returns the difference between the first sorted set and all the successive sorted sets, with the associated scores.
+ When in Cluster mode, all keys must map to the same hash slot.
+
+ See https://valkey.io/commands/zdiff for more details.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sorted sets.
+
+ Returns:
+ Mapping[bytes, float]: A mapping of elements and their scores representing the difference between the sorted
+ sets.
+ If the first `key` does not exist, it is treated as an empty sorted set, and the command returns an
+ empty list.
+
+ Examples:
+ >>> client.zadd("sorted_set1", {"element1":1.0, "element2": 2.0, "element3": 3.0})
+ >>> client.zadd("sorted_set2", {"element2": 2.0})
+ >>> client.zadd("sorted_set3", {"element3": 3.0})
+ >>> client.zdiff_withscores("sorted_set1", "sorted_set2", "sorted_set3")
+ {b"element1": 1.0} # Indicates that "element1" is in "sorted_set1" but not "sorted_set2" or "sorted_set3".
+ """
+ return cast(
+ Mapping[bytes, float],
+ self._execute_command(
+ RequestType.ZDiff, [str(len(keys))] + keys + ["WITHSCORES"]
+ ),
+ )
+
+ def zdiffstore(self, destination: TEncodable, keys: List[TEncodable]) -> int:
+ """
+ Calculates the difference between the first sorted set and all the successive sorted sets at `keys` and stores
+ the difference as a sorted set to `destination`, overwriting it if it already exists. Non-existent keys are
+ treated as empty sets.
+ See https://valkey.io/commands/zdiffstore for more details.
+
+ Note:
+ When in Cluster mode, all keys in `keys` and `destination` must map to the same hash slot.
+
+ Args:
+ destination (TEncodable): The key for the resulting sorted set.
+ keys (List[TEncodable]): The keys of the sorted sets to compare.
+
+ Returns:
+ int: The number of members in the resulting sorted set stored at `destination`.
+
+ Examples:
+ >>> client.zadd("key1", {"member1": 10.5, "member2": 8.2})
+ 2 # Indicates that two elements have been added to the sorted set at "key1".
+ >>> client.zadd("key2", {"member1": 10.5})
+ 1 # Indicates that one element has been added to the sorted set at "key2".
+ >>> client.zdiffstore("my_sorted_set", ["key1", "key2"])
+ 1 # One member exists in "key1" but not "key2", and this member was stored in "my_sorted_set".
+ >>> client.zrange("my_sorted_set", RangeByIndex(0, -1))
+ ['member2'] # "member2" is now stored in "my_sorted_set"
+ """
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.ZDiffStore, [destination, str(len(keys))] + keys
+ ),
+ )
+
+ def zinter(
+ self,
+ keys: List[TEncodable],
+ ) -> List[bytes]:
+ """
+ Computes the intersection of sorted sets given by the specified `keys` and returns a list of intersecting elements.
+ To get the scores as well, see `zinter_withscores`.
+ To store the result in a key as a sorted set, see `zinterstore`.
+
+ When in cluster mode, all keys in `keys` must map to the same hash slot.
+
+ See https://valkey.io/commands/zinter/ for more details.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sorted sets.
+
+ Returns:
+ List[bytes]: The resulting array of intersecting elements.
+
+ Examples:
+ >>> client.zadd("key1", {"member1": 10.5, "member2": 8.2})
+ >>> client.zadd("key2", {"member1": 9.5})
+ >>> client.zinter(["key1", "key2"])
+ [b'member1']
+ """
+ args: List[TEncodable] = [str(len(keys))]
+ args.extend(keys)
+ return cast(
+ List[bytes],
+ self._execute_command(RequestType.ZInter, args),
+ )
+
+ def zinter_withscores(
+ self,
+ keys: Union[List[TEncodable], List[Tuple[TEncodable, float]]],
+ aggregation_type: Optional[AggregationType] = None,
+ ) -> Mapping[bytes, float]:
+ """
+ Computes the intersection of sorted sets given by the specified `keys` and returns a sorted set of intersecting elements with scores.
+ To get the elements only, see `zinter`.
+ To store the result in a key as a sorted set, see `zinterstore`.
+
+ When in cluster mode, all keys in `keys` must map to the same hash slot.
+
+ See https://valkey.io/commands/zinter/ for more details.
+
+ Args:
+ keys (Union[List[TEncodable], List[Tuple[TEncodable, float]]]): The keys of the sorted sets with possible formats:
+ List[TEncodable] - for keys only.
+ List[Tuple[TEncodable, float]] - for weighted keys with score multipliers.
+ aggregation_type (Optional[AggregationType]): Specifies the aggregation strategy to apply
+ when combining the scores of elements. See `AggregationType`.
+
+ Returns:
+ Mapping[bytes, float]: The resulting sorted set with scores.
+
+ Examples:
+ >>> client.zadd("key1", {"member1": 10.5, "member2": 8.2})
+ >>> client.zadd("key2", {"member1": 9.5})
+ >>> client.zinter_withscores(["key1", "key2"])
+ {b'member1': 20} # "member1" with score of 20 is the result
+ >>> client.zinter_withscores(["key1", "key2"], AggregationType.MAX)
+ {b'member1': 10.5} # "member1" with score of 10.5 is the result.
+ """
+ args = _create_zinter_zunion_cmd_args(keys, aggregation_type)
+ args.append("WITHSCORES")
+ return cast(
+ Mapping[bytes, float],
+ self._execute_command(RequestType.ZInter, args),
+ )
+
+ def zinterstore(
+ self,
+ destination: TEncodable,
+ keys: Union[List[TEncodable], List[Tuple[TEncodable, float]]],
+ aggregation_type: Optional[AggregationType] = None,
+ ) -> int:
+ """
+ Computes the intersection of sorted sets given by the specified `keys` and stores the result in `destination`.
+ If `destination` already exists, it is overwritten. Otherwise, a new sorted set will be created.
+ To get the result directly, see `zinter_withscores`.
+
+ When in cluster mode, `destination` and all keys in `keys` must map to the same hash slot.
+
+ See https://valkey.io/commands/zinterstore/ for more details.
+
+ Args:
+ destination (TEncodable): The key of the destination sorted set.
+ keys (Union[List[TEncodable], List[Tuple[TEncodable, float]]]): The keys of the sorted sets with possible formats:
+ List[TEncodable] - for keys only.
+ List[Tuple[TEncodable, float]] - for weighted keys with score multipliers.
+ aggregation_type (Optional[AggregationType]): Specifies the aggregation strategy to apply
+ when combining the scores of elements. See `AggregationType`.
+
+ Returns:
+ int: The number of elements in the resulting sorted set stored at `destination`.
+
+ Examples:
+ >>> client.zadd("key1", {"member1": 10.5, "member2": 8.2})
+ >>> client.zadd("key2", {"member1": 9.5})
+ >>> client.zinterstore("my_sorted_set", ["key1", "key2"])
+ 1 # Indicates that the sorted set "my_sorted_set" contains one element.
+ >>> client.zrange_withscores("my_sorted_set", RangeByIndex(0, -1))
+ {b'member1': 20} # "member1" is now stored in "my_sorted_set" with score of 20.
+ >>> client.zinterstore("my_sorted_set", ["key1", "key2"], AggregationType.MAX)
+ 1 # Indicates that the sorted set "my_sorted_set" contains one element, and its score is the maximum score between the sets.
+ >>> client.zrange_withscores("my_sorted_set", RangeByIndex(0, -1))
+ {b'member1': 10.5} # "member1" is now stored in "my_sorted_set" with score of 10.5.
+ """
+ args = _create_zinter_zunion_cmd_args(keys, aggregation_type, destination)
+ return cast(
+ int,
+ self._execute_command(RequestType.ZInterStore, args),
+ )
+
+ def zunion(
+ self,
+ keys: List[TEncodable],
+ ) -> List[bytes]:
+ """
+ Computes the union of sorted sets given by the specified `keys` and returns a list of union elements.
+ To get the scores as well, see `zunion_withscores`.
+ To store the result in a key as a sorted set, see `zunionstore`.
+
+ When in cluster mode, all keys in `keys` must map to the same hash slot.
+
+ See https://valkey.io/commands/zunion/ for more details.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sorted sets.
+
+ Returns:
+ List[bytes]: The resulting array of union elements.
+
+ Examples:
+ >>> client.zadd("key1", {"member1": 10.5, "member2": 8.2})
+ >>> client.zadd("key2", {"member1": 9.5})
+ >>> client.zunion(["key1", "key2"])
+ [b'member1', b'member2']
+ """
+ args: List[TEncodable] = [str(len(keys))]
+ args.extend(keys)
+ return cast(
+ List[bytes],
+ self._execute_command(RequestType.ZUnion, args),
+ )
+
+ def zunion_withscores(
+ self,
+ keys: Union[List[TEncodable], List[Tuple[TEncodable, float]]],
+ aggregation_type: Optional[AggregationType] = None,
+ ) -> Mapping[bytes, float]:
+ """
+ Computes the union of sorted sets given by the specified `keys` and returns a sorted set of union elements with scores.
+ To get the elements only, see `zunion`.
+ To store the result in a key as a sorted set, see `zunionstore`.
+
+ When in cluster mode, all keys in `keys` must map to the same hash slot.
+
+ See https://valkey.io/commands/zunion/ for more details.
+
+ Args:
+ keys (Union[List[TEncodable], List[Tuple[TEncodable, float]]]): The keys of the sorted sets with possible formats:
+ List[TEncodable] - for keys only.
+ List[Tuple[TEncodable, float]] - for weighted keys with score multipliers.
+ aggregation_type (Optional[AggregationType]): Specifies the aggregation strategy to apply
+ when combining the scores of elements. See `AggregationType`.
+
+ Returns:
+ Mapping[bytes, float]: The resulting sorted set with scores.
+
+ Examples:
+ >>> client.zadd("key1", {"member1": 10.5, "member2": 8.2})
+ >>> client.zadd("key2", {"member1": 9.5})
+ >>> client.zunion_withscores(["key1", "key2"])
+ {b'member1': 20, b'member2': 8.2}
+ >>> client.zunion_withscores(["key1", "key2"], AggregationType.MAX)
+ {b'member1': 10.5, b'member2': 8.2}
+ """
+ args = _create_zinter_zunion_cmd_args(keys, aggregation_type)
+ args.append("WITHSCORES")
+ return cast(
+ Mapping[bytes, float],
+ self._execute_command(RequestType.ZUnion, args),
+ )
+
+ def zunionstore(
+ self,
+ destination: TEncodable,
+ keys: Union[List[TEncodable], List[Tuple[TEncodable, float]]],
+ aggregation_type: Optional[AggregationType] = None,
+ ) -> int:
+ """
+ Computes the union of sorted sets given by the specified `keys` and stores the result in `destination`.
+ If `destination` already exists, it is overwritten. Otherwise, a new sorted set will be created.
+ To get the result directly, see `zunion_withscores`.
+
+ When in cluster mode, `destination` and all keys in `keys` must map to the same hash slot.
+
+ See https://valkey.io/commands/zunionstore/ for more details.
+
+ Args:
+ destination (TEncodable): The key of the destination sorted set.
+ keys (Union[List[TEncodable], List[Tuple[TEncodable, float]]]): The keys of the sorted sets with possible formats:
+ List[TEncodable] - for keys only.
+ List[Tuple[TEncodable, float]] - for weighted keys with score multipliers.
+ aggregation_type (Optional[AggregationType]): Specifies the aggregation strategy to apply
+ when combining the scores of elements. See `AggregationType`.
+
+ Returns:
+ int: The number of elements in the resulting sorted set stored at `destination`.
+
+ Examples:
+ >>> client.zadd("key1", {"member1": 10.5, "member2": 8.2})
+ >>> client.zadd("key2", {"member1": 9.5})
+ >>> client.zunionstore("my_sorted_set", ["key1", "key2"])
+ 2 # Indicates that the sorted set "my_sorted_set" contains two elements.
+ >>> client.zrange_withscores("my_sorted_set", RangeByIndex(0, -1))
+ {b'member1': 20, b'member2': 8.2}
+ >>> client.zunionstore("my_sorted_set", ["key1", "key2"], AggregationType.MAX)
+ 2 # Indicates that the sorted set "my_sorted_set" contains two elements, and each score is the maximum score between the sets.
+ >>> client.zrange_withscores("my_sorted_set", RangeByIndex(0, -1))
+ {b'member1': 10.5, b'member2': 8.2}
+ """
+ args = _create_zinter_zunion_cmd_args(keys, aggregation_type, destination)
+ return cast(
+ int,
+ self._execute_command(RequestType.ZUnionStore, args),
+ )
+
+ def zrandmember(self, key: TEncodable) -> Optional[bytes]:
+ """
+ Returns a random member from the sorted set stored at 'key'.
+
+ See https://valkey.io/commands/zrandmember for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+
+ Returns:
+ Optional[bytes]: A random member from the sorted set.
+ If the sorted set does not exist or is empty, the response will be None.
+
+ Examples:
+ >>> client.zadd("my_sorted_set", {"member1": 1.0, "member2": 2.0})
+ >>> client.zrandmember("my_sorted_set")
+ b"member1" # "member1" is a random member of "my_sorted_set".
+ >>> client.zrandmember("non_existing_sorted_set")
+ None # "non_existing_sorted_set" is not an existing key, so None was returned.
+ """
+ args: List[TEncodable] = [key]
+ return cast(
+ Optional[bytes],
+ self._execute_command(RequestType.ZRandMember, [key]),
+ )
+
+ def zrandmember_count(self, key: TEncodable, count: int) -> List[bytes]:
+ """
+ Retrieves up to the absolute value of `count` random members from the sorted set stored at 'key'.
+
+ See https://valkey.io/commands/zrandmember for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ count (int): The number of members to return.
+ If `count` is positive, returns unique members.
+ If `count` is negative, allows for duplicates members.
+
+ Returns:
+ List[bytes]: A list of members from the sorted set.
+ If the sorted set does not exist or is empty, the response will be an empty list.
+
+ Examples:
+ >>> client.zadd("my_sorted_set", {"member1": 1.0, "member2": 2.0})
+ >>> client.zrandmember("my_sorted_set", -3)
+ [b"member1", b"member1", b"member2"] # "member1" and "member2" are random members of "my_sorted_set".
+ >>> client.zrandmember("non_existing_sorted_set", 3)
+ [] # "non_existing_sorted_set" is not an existing key, so an empty list was returned.
+ """
+ args: List[TEncodable] = [key, str(count)]
+ return cast(
+ List[bytes],
+ self._execute_command(RequestType.ZRandMember, args),
+ )
+
+ def zrandmember_withscores(
+ self, key: TEncodable, count: int
+ ) -> List[List[Union[bytes, float]]]:
+ """
+ Retrieves up to the absolute value of `count` random members along with their scores from the sorted set
+ stored at 'key'.
+
+ See https://valkey.io/commands/zrandmember for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ count (int): The number of members to return.
+ If `count` is positive, returns unique members.
+ If `count` is negative, allows for duplicates members.
+
+ Returns:
+ List[List[Union[bytes, float]]]: A list of `[member, score]` lists, where `member` is a random member from
+ the sorted set and `score` is the associated score.
+ If the sorted set does not exist or is empty, the response will be an empty list.
+
+ Examples:
+ >>> client.zadd("my_sorted_set", {"member1": 1.0, "member2": 2.0})
+ >>> client.zrandmember_withscores("my_sorted_set", -3)
+ [[b"member1", 1.0], [b"member1", 1.0], [b"member2", 2.0]] # "member1" and "member2" are random members of "my_sorted_set", and have scores of 1.0 and 2.0, respectively.
+ >>> client.zrandmember_withscores("non_existing_sorted_set", 3)
+ [] # "non_existing_sorted_set" is not an existing key, so an empty list was returned.
+ """
+ args: List[TEncodable] = [key, str(count), "WITHSCORES"]
+ return cast(
+ List[List[Union[bytes, float]]],
+ self._execute_command(RequestType.ZRandMember, args),
+ )
+
+ def zmpop(
+ self,
+ keys: List[TEncodable],
+ filter: ScoreFilter,
+ count: Optional[int] = None,
+ ) -> Optional[List[Union[bytes, Mapping[bytes, float]]]]:
+ """
+ Pops a member-score pair from the first non-empty sorted set, with the given keys being checked in the order
+ that they are given.
+
+ The optional `count` argument can be used to specify the number of elements to pop, and is
+ set to 1 by default.
+
+ The number of popped elements is the minimum from the sorted set's cardinality and `count`.
+
+ See https://valkey.io/commands/zmpop for more details.
+
+ Note:
+ When in cluster mode, all `keys` must map to the same hash slot.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sorted sets.
+ filter (ScoreFilter): The element pop criteria - either ScoreFilter.MIN or ScoreFilter.MAX to pop
+ members with the lowest/highest scores accordingly.
+ count (Optional[int]): The number of elements to pop.
+
+ Returns:
+ Optional[List[Union[bytes, Mapping[bytes, float]]]]: A two-element list containing the key name of the set from
+ which elements were popped, and a member-score mapping of the popped elements. If no members could be
+ popped, returns None.
+
+ Examples:
+ >>> client.zadd("zSet1", {"one": 1.0, "two": 2.0, "three": 3.0})
+ >>> client.zadd("zSet2", {"four": 4.0})
+ >>> client.zmpop(["zSet1", "zSet2"], ScoreFilter.MAX, 2)
+ [b'zSet1', {b'three': 3.0, b'two': 2.0}] # "three" with score 3.0 and "two" with score 2.0 were popped from "zSet1".
+
+ Since: Valkey version 7.0.0.
+ """
+ args: List[TEncodable] = [str(len(keys))] + keys + [filter.value]
+ if count is not None:
+ args.extend(["COUNT", str(count)])
+
+ return cast(
+ Optional[List[Union[bytes, Mapping[bytes, float]]]],
+ self._execute_command(RequestType.ZMPop, args),
+ )
+
+ def bzmpop(
+ self,
+ keys: List[TEncodable],
+ modifier: ScoreFilter,
+ timeout: float,
+ count: Optional[int] = None,
+ ) -> Optional[List[Union[bytes, Mapping[bytes, float]]]]:
+ """
+ Pops a member-score pair from the first non-empty sorted set, with the given keys being checked in the order
+ that they are given. Blocks the connection when there are no members to pop from any of the given sorted sets.
+
+ The optional `count` argument can be used to specify the number of elements to pop, and is set to 1 by default.
+
+ The number of popped elements is the minimum from the sorted set's cardinality and `count`.
+
+ `BZMPOP` is the blocking variant of `ZMPOP`.
+
+ See https://valkey.io/commands/bzmpop for more details.
+
+ Notes:
+ 1. When in cluster mode, all `keys` must map to the same hash slot.
+ 2. `BZMPOP` is a client blocking command, see https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#blocking-commands for more details and best practices.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sorted sets.
+ modifier (ScoreFilter): The element pop criteria - either ScoreFilter.MIN or ScoreFilter.MAX to pop
+ members with the lowest/highest scores accordingly.
+ timeout (float): The number of seconds to wait for a blocking operation to complete. A value of 0 will
+ block indefinitely.
+ count (Optional[int]): The number of elements to pop.
+
+ Returns:
+ Optional[List[Union[bytes, Mapping[bytes, float]]]]: A two-element list containing the key name of the set from
+ which elements were popped, and a member-score mapping of the popped elements. If no members could be
+ popped and the timeout expired, returns None.
+
+ Examples:
+ >>> client.zadd("zSet1", {"one": 1.0, "two": 2.0, "three": 3.0})
+ >>> client.zadd("zSet2", {"four": 4.0})
+ >>> client.bzmpop(["zSet1", "zSet2"], ScoreFilter.MAX, 0.5, 2)
+ [b'zSet1', {b'three': 3.0, b'two': 2.0}] # "three" with score 3.0 and "two" with score 2.0 were popped from "zSet1".
+
+ Since: Valkey version 7.0.0.
+ """
+ args = [str(timeout), str(len(keys))] + keys + [modifier.value]
+ if count is not None:
+ args = args + ["COUNT", str(count)]
+
+ return cast(
+ Optional[List[Union[bytes, Mapping[bytes, float]]]],
+ self._execute_command(RequestType.BZMPop, args),
+ )
+
+ def zintercard(
+ self, keys: List[TEncodable], limit: Optional[int] = None
+ ) -> int:
+ """
+ Returns the cardinality of the intersection of the sorted sets specified by `keys`. When provided with the
+ optional `limit` argument, if the intersection cardinality reaches `limit` partway through the computation, the
+ algorithm will exit early and yield `limit` as the cardinality.
+
+ See https://valkey.io/commands/zintercard for more details.
+
+ Args:
+ keys (List[TEncodable]): The keys of the sorted sets to intersect.
+ limit (Optional[int]): An optional argument that can be used to specify a maximum number for the
+ intersection cardinality. If limit is not supplied, or if it is set to 0, there will be no limit.
+
+ Note:
+ When in cluster mode, all `keys` must map to the same hash slot.
+
+ Returns:
+ int: The cardinality of the intersection of the given sorted sets, or the `limit` if reached.
+
+ Examples:
+ >>> client.zadd("key1", {"member1": 10.5, "member2": 8.2, "member3": 9.6})
+ >>> client.zadd("key2", {"member1": 10.5, "member2": 3.5})
+ >>> client.zintercard(["key1", "key2"])
+ 2 # Indicates that the intersection of the sorted sets at "key1" and "key2" has a cardinality of 2.
+ >>> client.zintercard(["key1", "key2"], 1)
+ 1 # A `limit` of 1 was provided, so the intersection computation exits early and yields the `limit` value of 1.
+
+ Since: Valkey version 7.0.0.
+ """
+ args = [str(len(keys))] + keys
+ if limit is not None:
+ args.extend(["LIMIT", str(limit)])
+
+ return cast(
+ int,
+ self._execute_command(RequestType.ZInterCard, args),
+ )
+
+ def script_show(self, sha1: TEncodable) -> bytes:
+ """
+ Returns the original source code of a script in the script cache.
+
+ See https://valkey.io/commands/script-show for more details.
+
+ Args:
+ sha1 (TEncodable): The SHA1 digest of the script.
+
+ Returns:
+ bytes: The original source code of the script, if present in the cache.
+ If the script is not found in the cache, an error is thrown.
+
+ Example:
+ >>> client.script_show(script.get_hash())
+ b"return { KEYS[1], ARGV[1] }"
+
+ Since: Valkey version 8.0.0.
+ """
+ return cast(bytes, self._execute_command(RequestType.ScriptShow, [sha1]))
+
+ def pfadd(self, key: TEncodable, elements: List[TEncodable]) -> int:
+ """
+ Adds all elements to the HyperLogLog data structure stored at the specified `key`.
+ Creates a new structure if the `key` does not exist.
+ When no elements are provided, and `key` exists and is a HyperLogLog, then no operation is performed.
+
+ See https://valkey.io/commands/pfadd/ for more details.
+
+ Args:
+ key (TEncodable): The key of the HyperLogLog data structure to add elements into.
+ elements (List[TEncodable]): A list of members to add to the HyperLogLog stored at `key`.
+
+ Returns:
+ int: If the HyperLogLog is newly created, or if the HyperLogLog approximated cardinality is
+ altered, then returns 1. Otherwise, returns 0.
+
+ Examples:
+ >>> client.pfadd("hll_1", ["a", "b", "c" ])
+ 1 # A data structure was created or modified
+ >>> client.pfadd("hll_2", [])
+ 1 # A new empty data structure was created
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.PfAdd, [key] + elements),
+ )
+
+ def pfcount(self, keys: List[TEncodable]) -> int:
+ """
+ Estimates the cardinality of the data stored in a HyperLogLog structure for a single key or
+ calculates the combined cardinality of multiple keys by merging their HyperLogLogs temporarily.
+
+ See https://valkey.io/commands/pfcount for more details.
+
+ Note:
+ When in Cluster mode, all `keys` must map to the same hash slot.
+
+ Args:
+ keys (List[TEncodable]): The keys of the HyperLogLog data structures to be analyzed.
+
+ Returns:
+ int: The approximated cardinality of given HyperLogLog data structures.
+ The cardinality of a key that does not exist is 0.
+
+ Examples:
+ >>> client.pfcount(["hll_1", "hll_2"])
+ 4 # The approximated cardinality of the union of "hll_1" and "hll_2" is 4.
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.PfCount, keys),
+ )
+
+ def pfmerge(
+ self, destination: TEncodable, source_keys: List[TEncodable]
+ ) -> TOK:
+ """
+ Merges multiple HyperLogLog values into a unique value. If the destination variable exists, it is treated as one
+ of the source HyperLogLog data sets, otherwise a new HyperLogLog is created.
+
+ See https://valkey.io/commands/pfmerge for more details.
+
+ Note:
+ When in Cluster mode, all keys in `source_keys` and `destination` must map to the same hash slot.
+
+ Args:
+ destination (TEncodable): The key of the destination HyperLogLog where the merged data sets will be stored.
+ source_keys (List[TEncodable]): The keys of the HyperLogLog structures to be merged.
+
+ Returns:
+ OK: A simple OK response.
+
+ Examples:
+ >>> client.pfadd("hll1", ["a", "b"])
+ >>> client.pfadd("hll2", ["b", "c"])
+ >>> client.pfmerge("new_hll", ["hll1", "hll2"])
+ OK # The value of "hll1" merged with "hll2" was stored in "new_hll".
+ >>> client.pfcount(["new_hll"])
+ 3 # The approximated cardinality of "new_hll" is 3.
+ """
+ return cast(
+ TOK,
+ self._execute_command(
+ RequestType.PfMerge, [destination] + source_keys
+ ),
+ )
+
+ def bitcount(
+ self, key: TEncodable, options: Optional[OffsetOptions] = None
+ ) -> int:
+ """
+ Counts the number of set bits (population counting) in the string stored at `key`. The `options` argument can
+ optionally be provided to count the number of bits in a specific string interval.
+
+ See https://valkey.io/commands/bitcount for more details.
+
+ Args:
+ key (TEncodable): The key for the string to count the set bits of.
+ options (Optional[OffsetOptions]): The offset options.
+
+ Returns:
+ int: If `options` is provided, returns the number of set bits in the string interval specified by `options`.
+ If `options` is not provided, returns the number of set bits in the string stored at `key`.
+ Otherwise, if `key` is missing, returns `0` as it is treated as an empty string.
+
+ Examples:
+ >>> client.bitcount("my_key1")
+ 2 # The string stored at "my_key1" contains 2 set bits.
+ >>> client.bitcount("my_key2", OffsetOptions(1))
+ 8 # From the second to last bytes of the string stored at "my_key2" there are 8 set bits.
+ >>> client.bitcount("my_key2", OffsetOptions(1, 3))
+ 2 # The second to fourth bytes of the string stored at "my_key2" contain 2 set bits.
+ >>> client.bitcount("my_key3", OffsetOptions(1, 1, BitmapIndexType.BIT))
+ 1 # Indicates that the second bit of the string stored at "my_key3" is set.
+ >>> client.bitcount("my_key3", OffsetOptions(-1, -1, BitmapIndexType.BIT))
+ 1 # Indicates that the last bit of the string stored at "my_key3" is set.
+ """
+ args: List[TEncodable] = [key]
+ if options is not None:
+ args.extend(options.to_args())
+
+ return cast(
+ int,
+ self._execute_command(RequestType.BitCount, args),
+ )
+
+ def setbit(self, key: TEncodable, offset: int, value: int) -> int:
+ """
+ Sets or clears the bit at `offset` in the string value stored at `key`. The `offset` is a zero-based index,
+ with `0` being the first element of the list, `1` being the next element, and so on. The `offset` must be less
+ than `2^32` and greater than or equal to `0`. If a key is non-existent then the bit at `offset` is set to
+ `value` and the preceding bits are set to `0`.
+
+ See https://valkey.io/commands/setbit for more details.
+
+ Args:
+ key (TEncodable): The key of the string.
+ offset (int): The index of the bit to be set.
+ value (int): The bit value to set at `offset`. The value must be `0` or `1`.
+
+ Returns:
+ int: The bit value that was previously stored at `offset`.
+
+ Examples:
+ >>> client.setbit("string_key", 1, 1)
+ 0 # The second bit value was 0 before setting to 1.
+ """
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.SetBit, [key, str(offset), str(value)]
+ ),
+ )
+
+ def getbit(self, key: TEncodable, offset: int) -> int:
+ """
+ Returns the bit value at `offset` in the string value stored at `key`.
+ `offset` should be greater than or equal to zero.
+
+ See https://valkey.io/commands/getbit for more details.
+
+ Args:
+ key (TEncodable): The key of the string.
+ offset (int): The index of the bit to return.
+
+ Returns:
+ int: The bit at the given `offset` of the string. Returns `0` if the key is empty or if the `offset` exceeds
+ the length of the string.
+
+ Examples:
+ >>> client.getbit("my_key", 1)
+ 1 # Indicates that the second bit of the string stored at "my_key" is set to 1.
+ """
+ return cast(
+ int,
+ self._execute_command(RequestType.GetBit, [key, str(offset)]),
+ )
+
+ def bitpos(
+ self, key: TEncodable, bit: int, options: Optional[OffsetOptions] = None
+ ) -> int:
+ """
+ Returns the position of the first bit matching the given `bit` value. The optional starting offset
+ `start` is a zero-based index, with `0` being the first byte of the list, `1` being the next byte and so on.
+ The offset can also be a negative number indicating an offset starting at the end of the list, with `-1` being
+ the last byte of the list, `-2` being the penultimate, and so on.
+
+ If you are using Valkey 7.0.0 or above, the optional `index_type` can also be provided to specify whether the
+ `start` and `end` offsets specify BIT or BYTE offsets. If `index_type` is not provided, BYTE offsets
+ are assumed. If BIT is specified, `start=0` and `end=2` means to look at the first three bits. If BYTE is
+ specified, `start=0` and `end=2` means to look at the first three bytes.
+
+ See https://valkey.io/commands/bitpos for more details.
+
+ Args:
+ key (TEncodable): The key of the string.
+ bit (int): The bit value to match. Must be `0` or `1`.
+ options (Optional[OffsetOptions]): The offset options.
+
+ Returns:
+ int: The position of the first occurrence of `bit` in the binary value of the string held at `key`.
+ If `start` was provided, the search begins at the offset indicated by `start`.
+
+ Examples:
+ >>> client.set("key1", "A1") # "A1" has binary value 01000001 00110001
+ >>> client.bitpos("key1", 1)
+ 1 # The first occurrence of bit value 1 in the string stored at "key1" is at the second position.
+ >>> client.bitpos("key1", 1, OffsetOptions(-1))
+ 10 # The first occurrence of bit value 1, starting at the last byte in the string stored at "key1", is at the eleventh position.
+
+ >>> client.set("key2", "A12") # "A12" has binary value 01000001 00110001 00110010
+ >>> client.bitpos("key2", 1, OffsetOptions(1, -1))
+ 10 # The first occurrence of bit value 1 in the second byte to the last byte of the string stored at "key1" is at the eleventh position.
+ >>> client.bitpos("key2", 1, OffsetOptions(2, 9, BitmapIndexType.BIT))
+ 7 # The first occurrence of bit value 1 in the third to tenth bits of the string stored at "key1" is at the eighth position.
+ """
+ args: List[TEncodable] = [key, str(bit)]
+ if options is not None:
+ args.extend(options.to_args())
+
+ return cast(
+ int,
+ self._execute_command(RequestType.BitPos, args),
+ )
+
+ def bitop(
+ self,
+ operation: BitwiseOperation,
+ destination: TEncodable,
+ keys: List[TEncodable],
+ ) -> int:
+ """
+ Perform a bitwise operation between multiple keys (containing string values) and store the result in the
+ `destination`.
+
+ See https://valkey.io/commands/bitop for more details.
+
+ Note:
+ When in cluster mode, `destination` and all `keys` must map to the same hash slot.
+
+ Args:
+ operation (BitwiseOperation): The bitwise operation to perform.
+ destination (TEncodable): The key that will store the resulting string.
+ keys (List[TEncodable]): The list of keys to perform the bitwise operation on.
+
+ Returns:
+ int: The size of the string stored in `destination`.
+
+ Examples:
+ >>> client.set("key1", "A") # "A" has binary value 01000001
+ >>> client.set("key1", "B") # "B" has binary value 01000010
+ >>> client.bitop(BitwiseOperation.AND, "destination", ["key1", "key2"])
+ 1 # The size of the resulting string stored in "destination" is 1
+ >>> client.get("destination")
+ "@" # "@" has binary value 01000000
+ """
+ return cast(
+ int,
+ self._execute_command(
+ RequestType.BitOp, [operation.value, destination] + keys
+ ),
+ )
+
+ def bitfield(
+ self, key: TEncodable, subcommands: List[BitFieldSubCommands]
+ ) -> List[Optional[int]]:
+ """
+ Reads or modifies the array of bits representing the string that is held at `key` based on the specified
+ `subcommands`.
+
+ See https://valkey.io/commands/bitfield for more details.
+
+ Args:
+ key (TEncodable): The key of the string.
+ subcommands (List[BitFieldSubCommands]): The subcommands to be performed on the binary value of the string
+ at `key`, which could be any of the following:
+ - `BitFieldGet`
+ - `BitFieldSet`
+ - `BitFieldIncrBy`
+ - `BitFieldOverflow`
+
+ Returns:
+ List[Optional[int]]: An array of results from the executed subcommands:
+ - `BitFieldGet` returns the value in `BitOffset` or `BitOffsetMultiplier`.
+ - `BitFieldSet` returns the old value in `BitOffset` or `BitOffsetMultiplier`.
+ - `BitFieldIncrBy` returns the new value in `BitOffset` or `BitOffsetMultiplier`.
+ - `BitFieldOverflow` determines the behavior of the "SET" and "INCRBY" subcommands when an overflow or
+ underflow occurs. "OVERFLOW" does not return a value and does not contribute a value to the list
+ response.
+
+ Examples:
+ >>> client.set("my_key", "A") # "A" has binary value 01000001
+ >>> client.bitfield("my_key", [BitFieldSet(UnsignedEncoding(2), BitOffset(1), 3), BitFieldGet(UnsignedEncoding(2), BitOffset(1))])
+ [2, 3] # The old value at offset 1 with an unsigned encoding of 2 was 2. The new value at offset 1 with an unsigned encoding of 2 is 3.
+ """
+ args = [key] + _create_bitfield_args(subcommands)
+ return cast(
+ List[Optional[int]],
+ self._execute_command(RequestType.BitField, args),
+ )
+
+ def bitfield_read_only(
+ self, key: TEncodable, subcommands: List[BitFieldGet]
+ ) -> List[int]:
+ """
+ Reads the array of bits representing the string that is held at `key` based on the specified `subcommands`.
+
+ See https://valkey.io/commands/bitfield_ro for more details.
+
+ Args:
+ key (TEncodable): The key of the string.
+ subcommands (List[BitFieldGet]): The "GET" subcommands to be performed.
+
+ Returns:
+ List[int]: An array of results from the "GET" subcommands.
+
+ Examples:
+ >>> client.set("my_key", "A") # "A" has binary value 01000001
+ >>> client.bitfield_read_only("my_key", [BitFieldGet(UnsignedEncoding(2), Offset(1))])
+ [2] # The value at offset 1 with an unsigned encoding of 2 is 2.
+
+ Since: Valkey version 6.0.0.
+ """
+ args = [key] + _create_bitfield_read_only_args(subcommands)
+ return cast(
+ List[int],
+ self._execute_command(RequestType.BitFieldReadOnly, args),
+ )
+
+ def object_encoding(self, key: TEncodable) -> Optional[bytes]:
+ """
+ Returns the internal encoding for the Valkey object stored at `key`.
+
+ See https://valkey.io/commands/object-encoding for more details.
+
+ Args:
+ key (TEncodable): The `key` of the object to get the internal encoding of.
+
+ Returns:
+ Optional[bytes]: If `key` exists, returns the internal encoding of the object stored at
+ `key` as a bytes string. Otherwise, returns None.
+
+ Examples:
+ >>> client.object_encoding("my_hash")
+ b"listpack" # The hash stored at "my_hash" has an internal encoding of "listpack".
+ """
+ return cast(
+ Optional[bytes],
+ self._execute_command(RequestType.ObjectEncoding, [key]),
+ )
+
+ def object_freq(self, key: TEncodable) -> Optional[int]:
+ """
+ Returns the logarithmic access frequency counter of a Valkey object stored at `key`.
+
+ See https://valkey.io/commands/object-freq for more details.
+
+ Args:
+ key (TEncodable): The key of the object to get the logarithmic access frequency counter of.
+
+ Returns:
+ Optional[int]: If `key` exists, returns the logarithmic access frequency counter of the object stored at `key` as an
+ integer. Otherwise, returns None.
+
+ Examples:
+ >>> client.object_freq("my_hash")
+ 2 # The logarithmic access frequency counter of "my_hash" has a value of 2.
+ """
+ return cast(
+ Optional[int],
+ self._execute_command(RequestType.ObjectFreq, [key]),
+ )
+
+ def object_idletime(self, key: TEncodable) -> Optional[int]:
+ """
+ Returns the time in seconds since the last access to the value stored at `key`.
+
+ See https://valkey.io/commands/object-idletime for more details.
+
+ Args:
+ key (TEncodable): The key of the object to get the idle time of.
+
+ Returns:
+ Optional[int]: If `key` exists, returns the idle time in seconds. Otherwise, returns None.
+
+ Examples:
+ >>> client.object_idletime("my_hash")
+ 13 # "my_hash" was last accessed 13 seconds ago.
+ """
+ return cast(
+ Optional[int],
+ self._execute_command(RequestType.ObjectIdleTime, [key]),
+ )
+
+ def object_refcount(self, key: TEncodable) -> Optional[int]:
+ """
+ Returns the reference count of the object stored at `key`.
+
+ See https://valkey.io/commands/object-refcount for more details.
+
+ Args:
+ key (TEncodable): The key of the object to get the reference count of.
+
+ Returns:
+ Optional[int]: If `key` exists, returns the reference count of the object stored at `key` as an integer.
+ Otherwise, returns None.
+
+ Examples:
+ >>> client.object_refcount("my_hash")
+ 2 # "my_hash" has a reference count of 2.
+ """
+ return cast(
+ Optional[int],
+ self._execute_command(RequestType.ObjectRefCount, [key]),
+ )
+
+ def srandmember(self, key: TEncodable) -> Optional[bytes]:
+ """
+ Returns a random element from the set value stored at 'key'.
+
+ See https://valkey.io/commands/srandmember for more details.
+
+ Args:
+ key (TEncodable): The key from which to retrieve the set member.
+
+ Returns:
+ Optional[bytes]: A random element from the set, or None if 'key' does not exist.
+
+ Examples:
+ >>> client.sadd("my_set", {"member1": 1.0, "member2": 2.0})
+ >>> client.srandmember(b"my_set")
+ b"member1" # "member1" is a random member of "my_set".
+ >>> client.srandmember("non_existing_set")
+ None # "non_existing_set" is not an existing key, so None was returned.
+ """
+ args: List[TEncodable] = [key]
+ return cast(
+ Optional[bytes],
+ self._execute_command(RequestType.SRandMember, args),
+ )
+
+ def srandmember_count(self, key: TEncodable, count: int) -> List[bytes]:
+ """
+ Returns one or more random elements from the set value stored at 'key'.
+
+ See https://valkey.io/commands/srandmember for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ count (int): The number of members to return.
+ If `count` is positive, returns unique members.
+ If `count` is negative, allows for duplicates members.
+
+ Returns:
+ List[bytes]: A list of members from the set.
+ If the set does not exist or is empty, the response will be an empty list.
+
+ Examples:
+ >>> client.sadd("my_set", {"member1": 1.0, "member2": 2.0})
+ >>> client.srandmember("my_set", -3)
+ [b"member1", b"member1", b"member2"] # "member1" and "member2" are random members of "my_set".
+ >>> client.srandmember("non_existing_set", 3)
+ [] # "non_existing_set" is not an existing key, so an empty list was returned.
+ """
+ return cast(
+ List[bytes],
+ self._execute_command(RequestType.SRandMember, [key, str(count)]),
+ )
+
+ def getex(
+ self,
+ key: TEncodable,
+ expiry: Optional[ExpiryGetEx] = None,
+ ) -> Optional[bytes]:
+ """
+ Get the value of `key` and optionally set its expiration. `GETEX` is similar to `GET`.
+ See https://valkey.io/commands/getex for more details.
+
+ Args:
+ key (TEncodable): The key to get.
+ expiry (Optional[ExpiryGetEx], optional): set expiriation to the given key.
+ Equivalent to [`EX` | `PX` | `EXAT` | `PXAT` | `PERSIST`] in the Valkey API.
+
+ Returns:
+ Optional[bytes]:
+ If `key` exists, return the value stored at `key`
+ If `key` does not exist, return `None`
+
+ Examples:
+ >>> client.set("key", "value")
+ 'OK'
+ >>> client.getex("key")
+ b'value'
+ >>> client.getex("key", ExpiryGetEx(ExpiryTypeGetEx.SEC, 1))
+ b'value'
+ >>> time.sleep(1)
+ >>> client.getex(b"key")
+ None
+
+ Since: Valkey version 6.2.0.
+ """
+ args = [key]
+ if expiry is not None:
+ args.extend(expiry.get_cmd_args())
+ return cast(
+ Optional[bytes],
+ self._execute_command(RequestType.GetEx, args),
+ )
+
+ def dump(
+ self,
+ key: TEncodable,
+ ) -> Optional[bytes]:
+ """
+ Serialize the value stored at `key` in a Valkey-specific format and return it to the user.
+
+ See https://valkey.io/commands/dump for more details.
+
+ Args:
+ key (TEncodable): The `key` to serialize.
+
+ Returns:
+ Optional[bytes]: The serialized value of the data stored at `key`.
+ If `key` does not exist, `None` will be returned.
+
+ Examples:
+ >>> client.dump("key")
+ b"value" # The serialized value stored at `key`.
+ >>> client.dump("nonExistingKey")
+ None # Non-existing key will return `None`.
+ """
+ return cast(
+ Optional[bytes],
+ self._execute_command(RequestType.Dump, [key]),
+ )
+
+ def restore(
+ self,
+ key: TEncodable,
+ ttl: int,
+ value: TEncodable,
+ replace: bool = False,
+ absttl: bool = False,
+ idletime: Optional[int] = None,
+ frequency: Optional[int] = None,
+ ) -> TOK:
+ """
+ Create a `key` associated with a `value` that is obtained by deserializing the provided
+ serialized `value` obtained via `dump`.
+
+ See https://valkey.io/commands/restore for more details.
+
+ Note: `IDLETIME` and `FREQ` modifiers cannot be set at the same time.
+
+ Args:
+ key (TEncodable): The `key` to create.
+ ttl (int): The expiry time (in milliseconds). If `0`, the `key` will persist.
+ value (TEncodable) The serialized value to deserialize and assign to `key`.
+ replace (bool): Set to `True` to replace the key if it exists.
+ absttl (bool): Set to `True` to specify that `ttl` represents an absolute Unix
+ timestamp (in milliseconds).
+ idletime (Optional[int]): Set the `IDLETIME` option with object idletime to the given key.
+ frequency (Optional[int]): Set the `FREQ` option with object frequency to the given key.
+
+ Returns:
+ OK: If the `key` was successfully restored with a `value`.
+
+ Examples:
+ >>> client.restore("newKey", 0, value)
+ OK # Indicates restore `newKey` without any ttl expiry nor any option
+ >>> client.restore("newKey", 0, value, replace=True)
+ OK # Indicates restore `newKey` with `REPLACE` option
+ >>> client.restore("newKey", 0, value, absttl=True)
+ OK # Indicates restore `newKey` with `ABSTTL` option
+ >>> client.restore("newKey", 0, value, idletime=10)
+ OK # Indicates restore `newKey` with `IDLETIME` option
+ >>> client.restore("newKey", 0, value, frequency=5)
+ OK # Indicates restore `newKey` with `FREQ` option
+ """
+ args = [key, str(ttl), value]
+ if replace is True:
+ args.append("REPLACE")
+ if absttl is True:
+ args.append("ABSTTL")
+ if idletime is not None:
+ args.extend(["IDLETIME", str(idletime)])
+ if frequency is not None:
+ args.extend(["FREQ", str(frequency)])
+ return cast(
+ TOK,
+ self._execute_command(RequestType.Restore, args),
+ )
+
+ def sscan(
+ self,
+ key: TEncodable,
+ cursor: TEncodable,
+ match: Optional[TEncodable] = None,
+ count: Optional[int] = None,
+ ) -> List[Union[bytes, List[bytes]]]:
+ """
+ Iterates incrementally over a set.
+
+ See https://valkey.io/commands/sscan for more details.
+
+ Args:
+ key (TEncodable): The key of the set.
+ cursor (TEncodable): The cursor that points to the next iteration of results. A value of "0" indicates the start of
+ the search.
+ match (Optional[TEncodable]): The match filter is applied to the result of the command and will only include
+ strings or byte strings that match the pattern specified. If the set is large enough for scan commands to return only a
+ subset of the set then there could be a case where the result is empty although there are items that
+ match the pattern specified. This is due to the default `COUNT` being `10` which indicates that it will
+ only fetch and match `10` items from the list.
+ count (Optional[int]): `COUNT` is a just a hint for the command for how many elements to fetch from the set.
+ `COUNT` could be ignored until the set is large enough for the `SCAN` commands to represent the results
+ as compact single-allocation packed encoding.
+
+ Returns:
+ List[Union[bytes, List[bytes]]]: An `Array` of the `cursor` and the subset of the set held by `key`.
+ The first element is always the `cursor` for the next iteration of results. `0` will be the `cursor`
+ returned on the last iteration of the set. The second element is always an `Array` of the subset of the
+ set held in `key`.
+
+ Examples:
+ # Assume "key" contains a set with 130 members
+ >>> result_cursor = "0"
+ >>> while True:
+ ... result = client.sscan("key", "0", match="*")
+ ... new_cursor = str(result [0])
+ ... print("Cursor: ", new_cursor)
+ ... print("Members: ", result[1])
+ ... if new_cursor == "0":
+ ... break
+ ... result_cursor = new_cursor
+ Cursor: 48
+ Members: [b'3', b'118', b'120', b'86', b'76', b'13', b'61', b'111', b'55', b'45']
+ Cursor: 24
+ Members: [b'38', b'109', b'11', b'119', b'34', b'24', b'40', b'57', b'20', b'17']
+ Cursor: 0
+ Members: [b'47', b'122', b'1', b'53', b'10', b'14', b'80']
+ """
+ args: List[TEncodable] = [key, cursor]
+ if match is not None:
+ args += ["MATCH", match]
+ if count is not None:
+ args += ["COUNT", str(count)]
+
+ return cast(
+ List[Union[bytes, List[bytes]]],
+ self._execute_command(RequestType.SScan, args),
+ )
+
+ def zscan(
+ self,
+ key: TEncodable,
+ cursor: TEncodable,
+ match: Optional[TEncodable] = None,
+ count: Optional[int] = None,
+ no_scores: bool = False,
+ ) -> List[Union[bytes, List[bytes]]]:
+ """
+ Iterates incrementally over a sorted set.
+
+ See https://valkey.io/commands/zscan for more details.
+
+ Args:
+ key (TEncodable): The key of the sorted set.
+ cursor (TEncodable): The cursor that points to the next iteration of results. A value of "0" indicates the start of
+ the search.
+ match (Optional[TEncodable]): The match filter is applied to the result of the command and will only include
+ strings or byte strings that match the pattern specified. If the sorted set is large enough for scan commands to return
+ only a subset of the sorted set then there could be a case where the result is empty although there are
+ items that match the pattern specified. This is due to the default `COUNT` being `10` which indicates
+ that it will only fetch and match `10` items from the list.
+ count (Optional[int]): `COUNT` is a just a hint for the command for how many elements to fetch from the
+ sorted set. `COUNT` could be ignored until the sorted set is large enough for the `SCAN` commands to
+ represent the results as compact single-allocation packed encoding.
+ no_scores (bool): If `True`, the command will not return scores associated with the members. Since Valkey "8.0.0".
+
+ Returns:
+ List[Union[bytes, List[bytes]]]: An `Array` of the `cursor` and the subset of the sorted set held by `key`.
+ The first element is always the `cursor` for the next iteration of results. `0` will be the `cursor`
+ returned on the last iteration of the sorted set. The second element is always an `Array` of the subset
+ of the sorted set held in `key`. The `Array` in the second element is a flattened series of
+ `String` pairs, where the value is at even indices and the score is at odd indices.
+ If `no_scores` is set to`True`, the second element will only contain the members without scores.
+
+ Examples:
+ # Assume "key" contains a sorted set with multiple members
+ >>> result_cursor = "0"
+ >>> while True:
+ ... result = client.zscan("key", "0", match="*", count=5)
+ ... new_cursor = str(result [0])
+ ... print("Cursor: ", new_cursor)
+ ... print("Members: ", result[1])
+ ... if new_cursor == "0":
+ ... break
+ ... result_cursor = new_cursor
+ Cursor: 123
+ Members: [b'value 163', b'163', b'value 114', b'114', b'value 25', b'25', b'value 82', b'82', b'value 64', b'64']
+ Cursor: 47
+ Members: [b'value 39', b'39', b'value 127', b'127', b'value 43', b'43', b'value 139', b'139', b'value 211', b'211']
+ Cursor: 0
+ Members: [b'value 55', b'55', b'value 24', b'24', b'value 90', b'90', b'value 113', b'113']
+
+ # Using no-score
+ >>> result_cursor = "0"
+ >>> while True:
+ ... result = client.zscan("key", "0", match="*", count=5, no_scores=True)
+ ... new_cursor = str(result[0])
+ ... print("Cursor: ", new_cursor)
+ ... print("Members: ", result[1])
+ ... if new_cursor == "0":
+ ... break
+ ... result_cursor = new_cursor
+ Cursor: 123
+ Members: [b'value 163', b'value 114', b'value 25', b'value 82', b'value 64']
+ Cursor: 47
+ Members: [b'value 39', b'value 127', b'value 43', b'value 139', b'value 211']
+ Cursor: 0
+ Members: [b'value 55', b'value 24', b'value 90', b'value 113']
+ """
+ args: List[TEncodable] = [key, cursor]
+ if match is not None:
+ args += ["MATCH", match]
+ if count is not None:
+ args += ["COUNT", str(count)]
+ if no_scores:
+ args.append("NOSCORES")
+
+ return cast(
+ List[Union[bytes, List[bytes]]],
+ self._execute_command(RequestType.ZScan, args),
+ )
+
+ def hscan(
+ self,
+ key: TEncodable,
+ cursor: TEncodable,
+ match: Optional[TEncodable] = None,
+ count: Optional[int] = None,
+ no_values: bool = False,
+ ) -> List[Union[bytes, List[bytes]]]:
+ """
+ Iterates incrementally over a hash.
+
+ See https://valkey.io/commands/hscan for more details.
+
+ Args:
+ key (TEncodable): The key of the set.
+ cursor (TEncodable): The cursor that points to the next iteration of results. A value of "0" indicates the start of
+ the search.
+ match (Optional[TEncodable]): The match filter is applied to the result of the command and will only include
+ strings or byte strings that match the pattern specified. If the hash is large enough for scan commands to return only a
+ subset of the hash then there could be a case where the result is empty although there are items that
+ match the pattern specified. This is due to the default `COUNT` being `10` which indicates that it will
+ only fetch and match `10` items from the list.
+ count (Optional[int]): `COUNT` is a just a hint for the command for how many elements to fetch from the hash.
+ `COUNT` could be ignored until the hash is large enough for the `SCAN` commands to represent the results
+ as compact single-allocation packed encoding.
+ no_values (bool): If `True`, the command will not return values the fields in the hash. Since Valkey "8.0.0".
+
+ Returns:
+ List[Union[bytes, List[bytes]]]: An `Array` of the `cursor` and the subset of the hash held by `key`.
+ The first element is always the `cursor` for the next iteration of results. `0` will be the `cursor`
+ returned on the last iteration of the hash. The second element is always an `Array` of the subset of the
+ hash held in `key`. The `Array` in the second element is a flattened series of `String` pairs,
+ where the value is at even indices and the score is at odd indices.
+ If `no_values` is set to `True`, the second element will only contain the fields without the values.
+
+ Examples:
+ # Assume "key" contains a hash with multiple members
+ >>> result_cursor = "0"
+ >>> while True:
+ ... result = client.hscan("key", "0", match="*", count=3)
+ ... new_cursor = str(result [0])
+ ... print("Cursor: ", new_cursor)
+ ... print("Members: ", result[1])
+ ... if new_cursor == "0":
+ ... break
+ ... result_cursor = new_cursor
+ Cursor: 1
+ Members: [b'field 79', b'value 79', b'field 20', b'value 20', b'field 115', b'value 115']
+ Cursor: 39
+ Members: [b'field 63', b'value 63', b'field 293', b'value 293', b'field 162', b'value 162']
+ Cursor: 0
+ Members: [b'field 420', b'value 420', b'field 221', b'value 221']
+
+ # Use no-values
+ >>> result_cursor = "0"
+ >>> while True:
+ ... result = client.hscan("key", "0", match="*", count=3, no_values=True)
+ ... new_cursor = str(result [0])
+ ... print("Cursor: ", new_cursor)
+ ... print("Members: ", result[1])
+ ... if new_cursor == "0":
+ ... break
+ ... result_cursor = new_cursor
+ Cursor: 1
+ Members: [b'field 79',b'field 20', b'field 115']
+ Cursor: 39
+ Members: [b'field 63', b'field 293', b'field 162']
+ Cursor: 0
+ Members: [b'field 420', b'field 221']
+ """
+ args: List[TEncodable] = [key, cursor]
+ if match is not None:
+ args += ["MATCH", match]
+ if count is not None:
+ args += ["COUNT", str(count)]
+ if no_values:
+ args.append("NOVALUES")
+
+ return cast(
+ List[Union[bytes, List[bytes]]],
+ self._execute_command(RequestType.HScan, args),
+ )
+
+ def fcall(
+ self,
+ function: TEncodable,
+ keys: Optional[List[TEncodable]] = None,
+ arguments: Optional[List[TEncodable]] = None,
+ ) -> TResult:
+ """
+ Invokes a previously loaded function.
+ See https://valkey.io/commands/fcall/ for more details.
+ When in cluster mode, all keys in `keys` must map to the same hash slot.
+ Args:
+ function (TEncodable): The function name.
+ keys (Optional[List[TEncodable]]): A list of keys accessed by the function. To ensure the correct
+ execution of functions, both in standalone and clustered deployments, all names of keys
+ that a function accesses must be explicitly provided as `keys`.
+ arguments (Optional[List[TEncodable]]): A list of `function` arguments. `Arguments`
+ should not represent names of keys.
+ Returns:
+ TResult:
+ The invoked function's return value.
+ Example:
+ >>> client.fcall("Deep_Thought")
+ b'new_value' # Returns the function's return value.
+
+ Since: Valkey version 7.0.0.
+ """
+ args: List[TEncodable] = []
+ if keys is not None:
+ args.extend([function, str(len(keys))] + keys)
+ else:
+ args.extend([function, str(0)])
+ if arguments is not None:
+ args.extend(arguments)
+ return cast(
+ TResult,
+ self._execute_command(RequestType.FCall, args),
+ )
+
+ def fcall_ro(
+ self,
+ function: TEncodable,
+ keys: Optional[List[TEncodable]] = None,
+ arguments: Optional[List[TEncodable]] = None,
+ ) -> TResult:
+ """
+ Invokes a previously loaded read-only function.
+
+ See https://valkey.io/commands/fcall_ro for more details.
+
+ When in cluster mode, all keys in `keys` must map to the same hash slot.
+
+ Args:
+ function (TEncodable): The function name.
+ keys (List[TEncodable]): An `array` of keys accessed by the function. To ensure the correct
+ execution of functions, all names of keys that a function accesses must be
+ explicitly provided as `keys`.
+ arguments (List[TEncodable]): An `array` of `function` arguments. `arguments` should not
+ represent names of keys.
+
+ Returns:
+ TResult: The return value depends on the function that was executed.
+
+ Examples:
+ >>> client.fcall_ro("Deep_Thought", ["key1"], ["Answer", "to", "the",
+ "Ultimate", "Question", "of", "Life,", "the", "Universe,", "and", "Everything"])
+ 42 # The return value on the function that was executed
+
+ Since: Valkey version 7.0.0.
+ """
+ args: List[TEncodable] = []
+ if keys is not None:
+ args.extend([function, str(len(keys))] + keys)
+ else:
+ args.extend([function, str(0)])
+ if arguments is not None:
+ args.extend(arguments)
+ return cast(
+ TResult,
+ self._execute_command(RequestType.FCallReadOnly, args),
+ )
+
+ def watch(self, keys: List[TEncodable]) -> TOK:
+ """
+ Marks the given keys to be watched for conditional execution of a transaction. Transactions
+ will only execute commands if the watched keys are not modified before execution of the
+ transaction.
+
+ See https://valkey.io/commands/watch for more details.
+
+ Note:
+ In cluster mode, if keys in `key_value_map` map to different hash slots,
+ the command will be split across these slots and executed separately for each.
+ This means the command is atomic only at the slot level. If one or more slot-specific
+ requests fail, the entire call will return the first encountered error, even
+ though some requests may have succeeded while others did not.
+ If this behavior impacts your application logic, consider splitting the
+ request into sub-requests per slot to ensure atomicity.
+
+ Args:
+ keys (List[TEncodable]): The keys to watch.
+
+ Returns:
+ TOK: A simple "OK" response.
+
+ Examples:
+ >>> client.watch("sampleKey")
+ 'OK'
+ >>> transaction.set("sampleKey", "foobar")
+ >>> client.exec(transaction)
+ 'OK' # Executes successfully and keys are unwatched.
+
+ >>> client.watch("sampleKey")
+ 'OK'
+ >>> transaction.set("sampleKey", "foobar")
+ >>> client.set("sampleKey", "hello world")
+ 'OK'
+ >>> client.exec(transaction)
+ None # None is returned when the watched key is modified before transaction execution.
+ """
+
+ return cast(
+ TOK,
+ self._execute_command(RequestType.Watch, keys),
+ )
+
+ @dataclass
+ class PubSubMsg:
+ """
+ Describes the incoming pubsub message
+
+ Attributes:
+ message (TEncodable): Incoming message.
+ channel (TEncodable): Name of an channel that triggered the message.
+ pattern (Optional[TEncodable]): Pattern that triggered the message.
+ """
+
+ message: TEncodable
+ channel: TEncodable
+ pattern: Optional[TEncodable]
+
+ def get_pubsub_message(self) -> PubSubMsg:
+ """
+ Returns the next pubsub message.
+ Throws WrongConfiguration in cases:
+ 1. No pubsub subscriptions are configured for the client
+ 2. Callback is configured with the pubsub subsciptions
+
+ See https://valkey.io/docs/topics/pubsub/ for more details.
+
+ Returns:
+ PubSubMsg: The next pubsub message
+
+ Examples:
+ >>> pubsub_msg = listening_client.get_pubsub_message()
+ """
+ ...
+
+ def try_get_pubsub_message(self) -> Optional[PubSubMsg]:
+ """
+ Tries to return the next pubsub message.
+ Throws WrongConfiguration in cases:
+ 1. No pubsub subscriptions are configured for the client
+ 2. Callback is configured with the pubsub subsciptions
+
+ See https://valkey.io/docs/topics/pubsub/ for more details.
+
+ Returns:
+ Optional[PubSubMsg]: The next pubsub message or None
+
+ Examples:
+ >>> pubsub_msg = listening_client.try_get_pubsub_message()
+ """
+ ...
+
+ def lcs(
+ self,
+ key1: TEncodable,
+ key2: TEncodable,
+ ) -> bytes:
+ """
+ Returns the longest common subsequence between strings stored at key1 and key2.
+
+ Note that this is different than the longest common string algorithm, since
+ matching characters in the two strings do not need to be contiguous.
+
+ For instance the LCS between "foo" and "fao" is "fo", since scanning the two strings
+ from left to right, the longest common set of characters is composed of the first "f" and then the "o".
+
+ See https://valkey.io/commands/lcs for more details.
+
+ Args:
+ key1 (TEncodable): The key that stores the first string.
+ key2 (TEncodable): The key that stores the second string.
+
+ Returns:
+ A Bytes String containing the longest common subsequence between the 2 strings.
+ An empty String is returned if the keys do not exist or have no common subsequences.
+
+ Examples:
+ >>> client.mset({"testKey1" : "abcd", "testKey2": "axcd"})
+ b'OK'
+ >>> client.lcs("testKey1", "testKey2")
+ b'acd'
+
+ Since: Valkey version 7.0.0.
+ """
+ args: List[TEncodable] = [key1, key2]
+
+ return cast(
+ bytes,
+ self._execute_command(RequestType.LCS, args),
+ )
+
+ def lcs_len(
+ self,
+ key1: TEncodable,
+ key2: TEncodable,
+ ) -> int:
+ """
+ Returns the length of the longest common subsequence between strings stored at key1 and key2.
+
+ Note that this is different than the longest common string algorithm, since
+ matching characters in the two strings do not need to be contiguous.
+
+ For instance the LCS between "foo" and "fao" is "fo", since scanning the two strings
+ from left to right, the longest common set of characters is composed of the first "f" and then the "o".
+
+ See https://valkey.io/commands/lcs for more details.
+
+ Args:
+ key1 (TEncodable): The key that stores the first string value.
+ key2 (TEncodable): The key that stores the second string value.
+
+ Returns:
+ The length of the longest common subsequence between the 2 strings.
+
+ Examples:
+ >>> client.mset({"testKey1" : "abcd", "testKey2": "axcd"})
+ 'OK'
+ >>> client.lcs_len("testKey1", "testKey2")
+ 3 # the length of the longest common subsequence between these 2 strings (b"acd") is 3.
+
+ Since: Valkey version 7.0.0.
+ """
+ args: List[TEncodable] = [key1, key2, "LEN"]
+
+ return cast(
+ int,
+ self._execute_command(RequestType.LCS, args),
+ )
+
+ def lcs_idx(
+ self,
+ key1: TEncodable,
+ key2: TEncodable,
+ min_match_len: Optional[int] = None,
+ with_match_len: Optional[bool] = False,
+ ) -> Mapping[bytes, Union[List[List[Union[List[int], int]]], int]]:
+ """
+ Returns the indices and length of the longest common subsequence between strings stored at key1 and key2.
+
+ Note that this is different than the longest common string algorithm, since
+ matching characters in the two strings do not need to be contiguous.
+
+ For instance the LCS between "foo" and "fao" is "fo", since scanning the two strings
+ from left to right, the longest common set of characters is composed of the first "f" and then the "o".
+
+ See https://valkey.io/commands/lcs for more details.
+
+ Args:
+ key1 (TEncodable): The key that stores the first string value.
+ key2 (TEncodable): The key that stores the second string value.
+ min_match_len (Optional[int]): The minimum length of matches to include in the result.
+ with_match_len (Optional[bool]): If True, include the length of the substring matched for each substring.
+
+ Returns:
+ A Mapping containing the indices of the longest common subsequence between the
+ 2 strings and the length of the longest common subsequence. The resulting map contains two
+ keys, "matches" and "len":
+ - "len" is mapped to the length of the longest common subsequence between the 2 strings.
+ - "matches" is mapped to a three dimensional int array that stores pairs of indices that
+ represent the location of the common subsequences in the strings held by key1 and key2,
+ with the length of the match after each matches, if with_match_len is enabled.
+
+ Examples:
+ >>> client.mset({"testKey1" : "abcd1234", "testKey2": "bcdef1234"})
+ 'OK'
+ >>> client.lcs_idx("testKey1", "testKey2")
+ {
+ b'matches': [
+ [
+ [4, 7], # starting and ending indices of the subsequence b"1234" in b"abcd1234" (testKey1)
+ [5, 8], # starting and ending indices of the subsequence b"1234" in b"bcdef1234" (testKey2)
+ ],
+ [
+ [1, 3], # starting and ending indices of the subsequence b"bcd" in b"abcd1234" (testKey1)
+ [0, 2], # starting and ending indices of the subsequence b"bcd" in b"bcdef1234" (testKey2)
+ ],
+ ],
+ b'len': 7 # length of the entire longest common subsequence
+ }
+ >>> client.lcs_idx("testKey1", "testKey2", min_match_len=4)
+ {
+ b'matches': [
+ [
+ [4, 7],
+ [5, 8],
+ ],
+ # the other match with a length of 3 is excluded
+ ],
+ b'len': 7
+ }
+ >>> client.lcs_idx("testKey1", "testKey2", with_match_len=True)
+ {
+ b'matches': [
+ [
+ [4, 7],
+ [5, 8],
+ 4, # length of this match (b"1234")
+ ],
+ [
+ [1, 3],
+ [0, 2],
+ 3, # length of this match (b"bcd")
+ ],
+ ],
+ b'len': 7
+ }
+
+ Since: Valkey version 7.0.0.
+ """
+ args: List[TEncodable] = [key1, key2, "IDX"]
+
+ if min_match_len is not None:
+ args.extend(["MINMATCHLEN", str(min_match_len)])
+
+ if with_match_len:
+ args.append("WITHMATCHLEN")
+
+ return cast(
+ Mapping[bytes, Union[List[List[Union[List[int], int]]], int]],
+ self._execute_command(RequestType.LCS, args),
+ )
+
+ def lpos(
+ self,
+ key: TEncodable,
+ element: TEncodable,
+ rank: Optional[int] = None,
+ count: Optional[int] = None,
+ max_len: Optional[int] = None,
+ ) -> Union[int, List[int], None]:
+ """
+ Returns the index or indexes of element(s) matching `element` in the `key` list. If no match is found,
+ None is returned.
+
+ See https://valkey.io/commands/lpos for more details.
+
+ Args:
+ key (TEncodable): The name of the list.
+ element (TEncodable): The value to search for within the list.
+ rank (Optional[int]): The rank of the match to return.
+ count (Optional[int]): The number of matches wanted. A `count` of 0 returns all the matches.
+ max_len (Optional[int]): The maximum number of comparisons to make between the element and the items
+ in the list. A `max_len` of 0 means unlimited amount of comparisons.
+
+ Returns:
+ Union[int, List[int], None]: The index of the first occurrence of `element`,
+ or None if `element` is not in the list.
+ With the `count` option, a list of indices of matching elements will be returned.
+
+ Examples:
+ >>> client.rpush(key, ['a', 'b', 'c', '1', '2', '3', 'c', 'c'])
+ >>> client.lpos(key, 'c')
+ 2
+ >>> client.lpos(key, 'c', rank = 2)
+ 6
+ >>> client.lpos(key, 'c', rank = -1)
+ 7
+ >>> client.lpos(key, 'c', count = 2)
+ [2, 6]
+ >>> client.lpos(key, 'c', count = 0)
+ [2, 6, 7]
+
+ Since: Valkey version 6.0.6.
+ """
+ args: List[TEncodable] = [key, element]
+
+ if rank is not None:
+ args.extend(["RANK", str(rank)])
+
+ if count is not None:
+ args.extend(["COUNT", str(count)])
+
+ if max_len is not None:
+ args.extend(["MAXLEN", str(max_len)])
+
+ return cast(
+ Union[int, List[int], None],
+ self._execute_command(RequestType.LPos, args),
+ )
+
+ def pubsub_channels(
+ self, pattern: Optional[TEncodable] = None
+ ) -> List[bytes]:
+ """
+ Lists the currently active channels.
+ The command is routed to all nodes, and aggregates the response to a single array.
+
+ See https://valkey.io/commands/pubsub-channels for more details.
+
+ Args:
+ pattern (Optional[TEncodable]): A glob-style pattern to match active channels.
+ If not provided, all active channels are returned.
+
+ Returns:
+ List[bytes]: A list of currently active channels matching the given pattern.
+ If no pattern is specified, all active channels are returned.
+
+ Examples:
+ >>> client.pubsub_channels()
+ [b"channel1", b"channel2"]
+
+ >>> client.pubsub_channels("news.*")
+ [b"news.sports", "news.weather"]
+ """
+
+ return cast(
+ List[bytes],
+ self._execute_command(
+ RequestType.PubSubChannels, [pattern] if pattern else []
+ ),
+ )
+
+ def pubsub_numpat(self) -> int:
+ """
+ Returns the number of unique patterns that are subscribed to by clients.
+
+ Note: This is the total number of unique patterns all the clients are subscribed to,
+ not the count of clients subscribed to patterns.
+ The command is routed to all nodes, and aggregates the response the sum of all pattern subscriptions.
+
+ See https://valkey.io/commands/pubsub-numpat for more details.
+
+ Returns:
+ int: The number of unique patterns.
+
+ Examples:
+ >>> client.pubsub_numpat()
+ 3
+ """
+ return cast(int, self._execute_command(RequestType.PubSubNumPat, []))
+
+ def pubsub_numsub(
+ self, channels: Optional[List[TEncodable]] = None
+ ) -> Mapping[bytes, int]:
+ """
+ Returns the number of subscribers (exclusive of clients subscribed to patterns) for the specified channels.
+
+ Note that it is valid to call this command without channels. In this case, it will just return an empty map.
+ The command is routed to all nodes, and aggregates the response to a single map of the channels and their number of subscriptions.
+
+ See https://valkey.io/commands/pubsub-numsub for more details.
+
+ Args:
+ channels (Optional[List[TEncodable]]): The list of channels to query for the number of subscribers.
+ If not provided, returns an empty map.
+
+ Returns:
+ Mapping[bytes, int]: A map where keys are the channel names and values are the number of subscribers.
+
+ Examples:
+ >>> client.pubsub_numsub(["channel1", "channel2"])
+ {b'channel1': 3, b'channel2': 5}
+
+ >>> client.pubsub_numsub()
+ {}
+ """
+ return cast(
+ Mapping[bytes, int],
+ self._execute_command(
+ RequestType.PubSubNumSub, channels if channels else []
+ ),
+ )
+
+ def sort(
+ self,
+ key: TEncodable,
+ by_pattern: Optional[TEncodable] = None,
+ limit: Optional[Limit] = None,
+ get_patterns: Optional[List[TEncodable]] = None,
+ order: Optional[OrderBy] = None,
+ alpha: Optional[bool] = None,
+ ) -> List[Optional[bytes]]:
+ """
+ Sorts the elements in the list, set, or sorted set at `key` and returns the result.
+ The `sort` command can be used to sort elements based on different criteria and apply transformations on sorted elements.
+ This command is routed to primary nodes only.
+ To store the result into a new key, see `sort_store`.
+
+ Note: When in cluster mode, `key`, and any patterns specified in `by_pattern` or `get_patterns`
+ must map to the same hash slot. The use of `by_pattern` and `get_patterns` in cluster mode is supported
+ only since Valkey version 8.0.
+
+ See https://valkey.io/commands/sort for more details.
+
+ Args:
+ key (TEncodable): The key of the list, set, or sorted set to be sorted.
+ by_pattern (Optional[TEncodable]): A pattern to sort by external keys instead of by the elements stored at the key themselves.
+ The pattern should contain an asterisk (*) as a placeholder for the element values, where the value
+ from the key replaces the asterisk to create the key name. For example, if `key` contains IDs of objects,
+ `by_pattern` can be used to sort these IDs based on an attribute of the objects, like their weights or
+ timestamps.
+ E.g., if `by_pattern` is `weight_*`, the command will sort the elements by the values of the
+ keys `weight_`.
+ If not provided, elements are sorted by their value.
+ Supported in cluster mode since Valkey version 8.0.
+ limit (Optional[Limit]): Limiting the range of the query by setting offset and result count. See `Limit` class for more information.
+ get_patterns (Optional[List[TEncodable]]): A pattern used to retrieve external keys' values, instead of the elements at `key`.
+ The pattern should contain an asterisk (*) as a placeholder for the element values, where the value
+ from `key` replaces the asterisk to create the key name. This allows the sorted elements to be
+ transformed based on the related keys values. For example, if `key` contains IDs of users, `get_pattern`
+ can be used to retrieve specific attributes of these users, such as their names or email addresses.
+ E.g., if `get_pattern` is `name_*`, the command will return the values of the keys `name_`
+ for each sorted element. Multiple `get_pattern` arguments can be provided to retrieve multiple attributes.
+ The special value `#` can be used to include the actual element from `key` being sorted.
+ If not provided, only the sorted elements themselves are returned.
+ Supported in cluster mode since Valkey version 8.0.
+ order (Optional[OrderBy]): Specifies the order to sort the elements.
+ Can be `OrderBy.ASC` (ascending) or `OrderBy.DESC` (descending).
+ alpha (Optional[bool]): When `True`, sorts elements lexicographically. When `False` (default), sorts elements numerically.
+ Use this when the list, set, or sorted set contains string values that cannot be converted into double precision floating point
+
+ Returns:
+ List[Optional[bytes]]: Returns a list of sorted elements.
+
+ Examples:
+ >>> client.lpush("mylist", [b"3", b"1", b"2"])
+ >>> client.sort("mylist")
+ [b'1', b'2', b'3']
+ >>> client.sort("mylist", order=OrderBy.DESC)
+ [b'3', b'2', b'1']
+ >>> client.lpush("mylist2", ['2', '1', '2', '3', '3', '1'])
+ >>> client.sort("mylist2", limit=Limit(2, 3))
+ [b'2', b'2', b'3']
+ >>> client.hset("user:1": {"name": "Alice", "age": '30'})
+ >>> client.hset("user:2", {"name": "Bob", "age": '25'})
+ >>> client.lpush("user_ids", ['2', '1'])
+ >>> client.sort("user_ids", by_pattern="user:*->age", get_patterns=["user:*->name"])
+ [b'Bob', b'Alice']
+ """
+ args = _build_sort_args(key, by_pattern, limit, get_patterns, order, alpha)
+ result = self._execute_command(RequestType.Sort, args)
+ return cast(List[Optional[bytes]], result)
+
+ def sort_ro(
+ self,
+ key: TEncodable,
+ by_pattern: Optional[TEncodable] = None,
+ limit: Optional[Limit] = None,
+ get_patterns: Optional[List[TEncodable]] = None,
+ order: Optional[OrderBy] = None,
+ alpha: Optional[bool] = None,
+ ) -> List[Optional[bytes]]:
+ """
+ Sorts the elements in the list, set, or sorted set at `key` and returns the result.
+ The `sort_ro` command can be used to sort elements based on different criteria and apply transformations on sorted elements.
+ This command is routed depending on the client's `ReadFrom` strategy.
+
+ See https://valkey.io/commands/sort for more details.
+
+ Note: When in cluster mode, `key`, and any patterns specified in `by_pattern` or `get_patterns`
+ must map to the same hash slot. The use of `by_pattern` and `get_patterns` in cluster mode is supported
+ only since Valkey version 8.0.
+
+ Args:
+ key (TEncodable): The key of the list, set, or sorted set to be sorted.
+ by_pattern (Optional[TEncodable]): A pattern to sort by external keys instead of by the elements stored at the key themselves.
+ The pattern should contain an asterisk (*) as a placeholder for the element values, where the value
+ from the key replaces the asterisk to create the key name. For example, if `key` contains IDs of objects,
+ `by_pattern` can be used to sort these IDs based on an attribute of the objects, like their weights or
+ timestamps.
+ E.g., if `by_pattern` is `weight_*`, the command will sort the elements by the values of the
+ keys `weight_`.
+ If not provided, elements are sorted by their value.
+ Supported in cluster mode since Valkey version 8.0.
+ limit (Optional[Limit]): Limiting the range of the query by setting offset and result count. See `Limit` class for more information.
+ get_pattern (Optional[TEncodable]): A pattern used to retrieve external keys' values, instead of the elements at `key`.
+ The pattern should contain an asterisk (*) as a placeholder for the element values, where the value
+ from `key` replaces the asterisk to create the key name. This allows the sorted elements to be
+ transformed based on the related keys values. For example, if `key` contains IDs of users, `get_pattern`
+ can be used to retrieve specific attributes of these users, such as their names or email addresses.
+ E.g., if `get_pattern` is `name_*`, the command will return the values of the keys `name_`
+ for each sorted element. Multiple `get_pattern` arguments can be provided to retrieve multiple attributes.
+ The special value `#` can be used to include the actual element from `key` being sorted.
+ If not provided, only the sorted elements themselves are returned.
+ Supported in cluster mode since Valkey version 8.0.
+ order (Optional[OrderBy]): Specifies the order to sort the elements.
+ Can be `OrderBy.ASC` (ascending) or `OrderBy.DESC` (descending).
+ alpha (Optional[bool]): When `True`, sorts elements lexicographically. When `False` (default), sorts elements numerically.
+ Use this when the list, set, or sorted set contains string values that cannot be converted into double precision floating point
+
+ Returns:
+ List[Optional[bytes]]: Returns a list of sorted elements.
+
+ Examples:
+ >>> client.lpush("mylist", 3, 1, 2)
+ >>> client.sort_ro("mylist")
+ [b'1', b'2', b'3']
+ >>> client.sort_ro("mylist", order=OrderBy.DESC)
+ [b'3', b'2', b'1']
+ >>> client.lpush("mylist2", 2, 1, 2, 3, 3, 1)
+ >>> client.sort_ro("mylist2", limit=Limit(2, 3))
+ [b'2', b'2', b'3']
+ >>> client.hset("user:1", "name", "Alice", "age", 30)
+ >>> client.hset("user:2", "name", "Bob", "age", 25)
+ >>> client.lpush("user_ids", 2, 1)
+ >>> client.sort_ro("user_ids", by_pattern="user:*->age", get_patterns=["user:*->name"])
+ [b'Bob', b'Alice']
+
+ Since: Valkey version 7.0.0.
+ """
+ args = _build_sort_args(key, by_pattern, limit, get_patterns, order, alpha)
+ result = self._execute_command(RequestType.SortReadOnly, args)
+ return cast(List[Optional[bytes]], result)
+
+ def sort_store(
+ self,
+ key: TEncodable,
+ destination: TEncodable,
+ by_pattern: Optional[TEncodable] = None,
+ limit: Optional[Limit] = None,
+ get_patterns: Optional[List[TEncodable]] = None,
+ order: Optional[OrderBy] = None,
+ alpha: Optional[bool] = None,
+ ) -> int:
+ """
+ Sorts the elements in the list, set, or sorted set at `key` and stores the result in `store`.
+ The `sort` command can be used to sort elements based on different criteria, apply transformations on sorted elements, and store the result in a new key.
+ To get the sort result without storing it into a key, see `sort`.
+
+ See https://valkey.io/commands/sort for more details.
+
+ Note: When in cluster mode, `key`, `destination`, and any patterns specified in `by_pattern` or `get_patterns`
+ must map to the same hash slot. The use of `by_pattern` and `get_patterns` in cluster mode is supported
+ only since Valkey version 8.0.
+
+ Args:
+ key (TEncodable): The key of the list, set, or sorted set to be sorted.
+ destination (TEncodable): The key where the sorted result will be stored.
+ by_pattern (Optional[TEncodable]): A pattern to sort by external keys instead of by the elements stored at the key themselves.
+ The pattern should contain an asterisk (*) as a placeholder for the element values, where the value
+ from the key replaces the asterisk to create the key name. For example, if `key` contains IDs of objects,
+ `by_pattern` can be used to sort these IDs based on an attribute of the objects, like their weights or
+ timestamps.
+ E.g., if `by_pattern` is `weight_*`, the command will sort the elements by the values of the
+ keys `weight_`.
+ If not provided, elements are sorted by their value.
+ Supported in cluster mode since Valkey version 8.0.
+ limit (Optional[Limit]): Limiting the range of the query by setting offset and result count. See `Limit` class for more information.
+ get_patterns (Optional[List[TEncodable]]): A pattern used to retrieve external keys' values, instead of the elements at `key`.
+ The pattern should contain an asterisk (*) as a placeholder for the element values, where the value
+ from `key` replaces the asterisk to create the key name. This allows the sorted elements to be
+ transformed based on the related keys values. For example, if `key` contains IDs of users, `get_pattern`
+ can be used to retrieve specific attributes of these users, such as their names or email addresses.
+ E.g., if `get_pattern` is `name_*`, the command will return the values of the keys `name_`
+ for each sorted element. Multiple `get_pattern` arguments can be provided to retrieve multiple attributes.
+ The special value `#` can be used to include the actual element from `key` being sorted.
+ If not provided, only the sorted elements themselves are returned.
+ Supported in cluster mode since Valkey version 8.0.
+ order (Optional[OrderBy]): Specifies the order to sort the elements.
+ Can be `OrderBy.ASC` (ascending) or `OrderBy.DESC` (descending).
+ alpha (Optional[bool]): When `True`, sorts elements lexicographically. When `False` (default), sorts elements numerically.
+ Use this when the list, set, or sorted set contains string values that cannot be converted into double precision floating point
+
+ Returns:
+ int: The number of elements in the sorted key stored at `store`.
+
+ Examples:
+ >>> client.lpush("mylist", ['3', '1', '2'])
+ >>> client.sort_store("mylist", "{mylist}sorted_list")
+ 3 # Indicates that the sorted list "{mylist}sorted_list" contains three elements.
+ >>> client.lrange("{mylist}sorted_list", 0, -1)
+ [b'1', b'2', b'3']
+ """
+ args = _build_sort_args(
+ key, by_pattern, limit, get_patterns, order, alpha, store=destination
+ )
+ result = self._execute_command(RequestType.Sort, args)
+ return cast(int, result)
diff --git a/python/python/tests/conftest.py b/python/python/tests/conftest.py
index 0ab5c9d6e9..8046e96c99 100644
--- a/python/python/tests/conftest.py
+++ b/python/python/tests/conftest.py
@@ -1,7 +1,7 @@
# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
import random
-from typing import AsyncGenerator, List, Optional, Union
+from typing import AsyncGenerator, Generator, List, Optional, Union
import pytest
from glide.config import (
@@ -14,6 +14,7 @@
)
from glide.exceptions import ClosingError
from glide.glide_client import GlideClient, GlideClusterClient, TGlideClient
+from glide.glide_sync_client import GlideSync
from glide.logger import Level as logLevel
from glide.logger import Logger
from glide.routes import AllNodes
@@ -116,6 +117,7 @@ def create_clusters(tls, load_module, cluster_endpoints, standalone_endpoints):
"""
Create Valkey clusters based on the provided options.
"""
+ # return
if cluster_endpoints or standalone_endpoints:
# Endpoints were passed by the caller, not creating clusters internally
if cluster_endpoints:
@@ -220,6 +222,18 @@ async def glide_client(
await test_teardown(request, cluster_mode, protocol)
await client.close()
+@pytest.fixture(scope="function")
+def glide_sync_client(
+ request,
+ cluster_mode: bool,
+ protocol: ProtocolVersion,
+) -> Generator[GlideSync, None, None]:
+ "Get async socket client for tests"
+ client = create_sync_client(request, cluster_mode, protocol=protocol)
+ yield client
+ sync_test_teardown(request, cluster_mode, protocol)
+ client.close()
+
@pytest.fixture(scope="function")
async def management_client(
@@ -233,8 +247,7 @@ async def management_client(
await test_teardown(request, cluster_mode, protocol)
await client.close()
-
-async def create_client(
+def create_client_config(
request,
cluster_mode: bool,
credentials: Optional[ServerCredentials] = None,
@@ -253,8 +266,7 @@ async def create_client(
read_from: ReadFrom = ReadFrom.PRIMARY,
client_az: Optional[str] = None,
valkey_cluster: Optional[ValkeyCluster] = None,
-) -> Union[GlideClient, GlideClusterClient]:
- # Create async socket client
+) -> Union[GlideClusterClientConfiguration, GlideClientConfiguration]:
use_tls = request.config.getoption("--tls")
if cluster_mode:
valkey_cluster = valkey_cluster or pytest.valkey_cluster
@@ -262,7 +274,7 @@ async def create_client(
assert database_id == 0
k = min(3, len(valkey_cluster.nodes_addr))
seed_nodes = random.sample(valkey_cluster.nodes_addr, k=k)
- cluster_config = GlideClusterClientConfiguration(
+ config = GlideClusterClientConfiguration(
addresses=seed_nodes if addresses is None else addresses,
use_tls=use_tls,
credentials=credentials,
@@ -274,7 +286,6 @@ async def create_client(
read_from=read_from,
client_az=client_az,
)
- return await GlideClusterClient.create(cluster_config)
else:
assert type(pytest.standalone_cluster) is ValkeyCluster
config = GlideClientConfiguration(
@@ -292,9 +303,89 @@ async def create_client(
read_from=read_from,
client_az=client_az,
)
+ return config
+
+
+async def create_client(
+ request,
+ cluster_mode: bool,
+ credentials: Optional[ServerCredentials] = None,
+ database_id: int = 0,
+ addresses: Optional[List[NodeAddress]] = None,
+ client_name: Optional[str] = None,
+ protocol: ProtocolVersion = ProtocolVersion.RESP3,
+ timeout: Optional[int] = 1000,
+ cluster_mode_pubsub: Optional[
+ GlideClusterClientConfiguration.PubSubSubscriptions
+ ] = None,
+ standalone_mode_pubsub: Optional[
+ GlideClientConfiguration.PubSubSubscriptions
+ ] = None,
+ inflight_requests_limit: Optional[int] = None,
+ read_from: ReadFrom = ReadFrom.PRIMARY,
+ client_az: Optional[str] = None,
+ valkey_cluster: Optional[ValkeyCluster] = None,
+) -> Union[GlideClient, GlideClusterClient]:
+ # Create async socket client
+ config = create_client_config(
+ request,
+ cluster_mode,
+ credentials,
+ database_id,
+ addresses,
+ client_name,
+ protocol,
+ timeout,
+ cluster_mode_pubsub,
+ standalone_mode_pubsub,
+ inflight_requests_limit,
+ read_from,
+ client_az,
+ valkey_cluster)
+ if cluster_mode:
+ return await GlideClusterClient.create(config)
+ else:
return await GlideClient.create(config)
+def create_sync_client(
+ request,
+ cluster_mode: bool,
+ credentials: Optional[ServerCredentials] = None,
+ database_id: int = 0,
+ addresses: Optional[List[NodeAddress]] = None,
+ client_name: Optional[str] = None,
+ protocol: ProtocolVersion = ProtocolVersion.RESP3,
+ timeout: Optional[int] = 1000,
+ cluster_mode_pubsub: Optional[
+ GlideClusterClientConfiguration.PubSubSubscriptions
+ ] = None,
+ standalone_mode_pubsub: Optional[
+ GlideClientConfiguration.PubSubSubscriptions
+ ] = None,
+ inflight_requests_limit: Optional[int] = None,
+ read_from: ReadFrom = ReadFrom.PRIMARY,
+ client_az: Optional[str] = None,
+ valkey_cluster: Optional[ValkeyCluster] = None,
+) -> GlideSync:
+ # Create sync client
+ config = create_client_config(
+ request,
+ cluster_mode,
+ credentials,
+ database_id,
+ addresses,
+ client_name,
+ protocol,
+ timeout,
+ cluster_mode_pubsub,
+ standalone_mode_pubsub,
+ inflight_requests_limit,
+ read_from,
+ client_az,
+ valkey_cluster)
+ return GlideSync(config)
+
NEW_PASSWORD = "new_secure_password"
WRONG_PASSWORD = "wrong_password"
@@ -309,6 +400,26 @@ async def auth_client(client: TGlideClient, password):
await client.custom_command(["AUTH", password], route=AllNodes())
+def sync_auth_client(client: TGlideClient, password):
+ """
+ Authenticates the given TGlideClient server connected.
+ """
+ if isinstance(client, GlideClient):
+ client.custom_command(["AUTH", password])
+ elif isinstance(client, GlideClusterClient):
+ client.custom_command(["AUTH", password], route=AllNodes())
+
+
+def sync_config_set_new_password(client: TGlideClient, password):
+ """
+ Sets a new password for the given TGlideClient server connected.
+ This function updates the server to require a new password.
+ """
+ if isinstance(client, GlideClient):
+ client.config_set({"requirepass": password})
+ elif isinstance(client, GlideClusterClient):
+ client.config_set({"requirepass": password}, route=AllNodes())
+
async def config_set_new_password(client: TGlideClient, password):
"""
Sets a new password for the given TGlideClient server connected.
@@ -320,6 +431,17 @@ async def config_set_new_password(client: TGlideClient, password):
await client.config_set({"requirepass": password}, route=AllNodes())
+def sync_kill_connections(client: TGlideClient):
+ """
+ Kills all connections to the given TGlideClient server connected.
+ """
+ if isinstance(client, GlideClient):
+ client.custom_command(["CLIENT", "KILL", "TYPE", "normal"])
+ elif isinstance(client, GlideClusterClient):
+ client.custom_command(
+ ["CLIENT", "KILL", "TYPE", "normal"], route=AllNodes()
+ )
+
async def kill_connections(client: TGlideClient):
"""
Kills all connections to the given TGlideClient server connected.
@@ -372,6 +494,46 @@ async def test_teardown(request, cluster_mode: bool, protocol: ProtocolVersion):
raise e
+def sync_test_teardown(request, cluster_mode: bool, protocol: ProtocolVersion):
+ """
+ Perform teardown tasks such as flushing all data from the cluster.
+
+ If authentication is required, attempt to connect with the known password,
+ reset it back to empty, and proceed with teardown.
+ """
+ credentials = None
+ try:
+ # Try connecting without credentials
+ client = create_sync_client(
+ request, cluster_mode, protocol=protocol, timeout=2000
+ )
+ client.custom_command(["FLUSHALL"])
+ client.close()
+ except ClosingError as e:
+ # Check if the error is due to authentication
+ if "NOAUTH" in str(e):
+ # Use the known password to authenticate
+ credentials = ServerCredentials(password=NEW_PASSWORD)
+ client = create_sync_client(
+ request,
+ cluster_mode,
+ protocol=protocol,
+ timeout=2000,
+ credentials=credentials,
+ )
+ try:
+ sync_auth_client(client, NEW_PASSWORD)
+ # Reset the server password back to empty
+ sync_config_set_new_password(client, "")
+ client.update_connection_password(None)
+ # Perform the teardown
+ client.custom_command(["FLUSHALL"])
+ finally:
+ client.close()
+ else:
+ raise e
+
+
@pytest.fixture(autouse=True)
async def skip_if_version_below(request):
"""
diff --git a/python/python/tests/test_async_client.py b/python/python/tests/test_async_client.py
index 23064b2f3d..67cb6a4b50 100644
--- a/python/python/tests/test_async_client.py
+++ b/python/python/tests/test_async_client.py
@@ -11,6 +11,7 @@
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union, cast
import pytest
+from glide.glide_client_sync_uds import UDSGlideClientSync
from glide import ClosingError, RequestError, Script
from glide.async_commands.bitmap import (
BitFieldGet,
@@ -41,6 +42,7 @@
InsertPosition,
UpdateOptions,
)
+from glide.glide_async_ffi_client import GlideAsync
from glide.async_commands.sorted_set import (
AggregationType,
GeoSearchByBox,
@@ -76,6 +78,7 @@
GlideClusterClientConfiguration,
ProtocolVersion,
ServerCredentials,
+ NodeAddress
)
from glide.constants import OK, TEncodable, TFunctionStatsSingleNodeResponse, TResult
from glide.exceptions import TimeoutError as GlideTimeoutError
@@ -110,6 +113,10 @@
@pytest.mark.asyncio
+def test_sync_uds_client():
+ config = GlideClientConfiguration([NodeAddress("localhost", 6379)])
+ client = UDSGlideClientSync.create(config)
+ client.set("foo", "bar")
class TestGlideClients:
@pytest.mark.parametrize("cluster_mode", [True, False])
@pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
@@ -152,11 +159,12 @@ async def test_send_and_receive_non_ascii_unicode(self, glide_client: TGlideClie
assert await glide_client.get(key.encode()) == value.encode()
@pytest.mark.parametrize("value_size", [100, 2**16])
- @pytest.mark.parametrize("cluster_mode", [True, False])
- @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ # @pytest.mark.parametrize("cluster_mode", [True, False])
+ # @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
async def test_client_handle_concurrent_workload_without_dropping_or_changing_values(
- self, glide_client: TGlideClient, value_size
+ self, value_size
):
+ glide_client = GlideAsync()
num_of_concurrent_tasks = 100
running_tasks = set()
@@ -306,9 +314,10 @@ async def test_statistics(self, glide_client: TGlideClient):
@pytest.mark.asyncio
class TestCommands:
@pytest.mark.smoke_test
- @pytest.mark.parametrize("cluster_mode", [True, False])
- @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
- async def test_socket_set_get(self, glide_client: TGlideClient):
+ # @pytest.mark.parametrize("cluster_mode", [True, False])
+ # @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ async def test_socket_set_get(self):
+ glide_client = GlideAsync()
key = get_random_string(10)
value = datetime.now(timezone.utc).strftime("%m/%d/%Y, %H:%M:%S")
assert await glide_client.set(key, value) == OK
diff --git a/python/python/tests/test_sync_client.py b/python/python/tests/test_sync_client.py
new file mode 100644
index 0000000000..f7ba939f0c
--- /dev/null
+++ b/python/python/tests/test_sync_client.py
@@ -0,0 +1,10573 @@
+# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
+# mypy: disable_error_code="arg-type"
+
+from __future__ import annotations
+
+import asyncio
+import copy
+import math
+import time
+from datetime import date, datetime, timedelta, timezone
+from typing import Any, Dict, List, Mapping, Optional, Tuple, Union, cast
+
+import pytest
+from glide import ClosingError, RequestError, Script
+from glide.async_commands.bitmap import (
+ BitFieldGet,
+ BitFieldIncrBy,
+ BitFieldOverflow,
+ BitFieldSet,
+ BitmapIndexType,
+ BitOffset,
+ BitOffsetMultiplier,
+ BitOverflowControl,
+ BitwiseOperation,
+ OffsetOptions,
+ SignedEncoding,
+ UnsignedEncoding,
+)
+from glide.async_commands.command_args import Limit, ListDirection, OrderBy
+from glide.async_commands.core import (
+ ConditionalChange,
+ ExpireOptions,
+ ExpiryGetEx,
+ ExpirySet,
+ ExpiryType,
+ ExpiryTypeGetEx,
+ FlushMode,
+ FunctionRestorePolicy,
+ InfBound,
+ InfoSection,
+ InsertPosition,
+ UpdateOptions,
+)
+from glide.async_commands.sorted_set import (
+ AggregationType,
+ GeoSearchByBox,
+ GeoSearchByRadius,
+ GeoSearchCount,
+ GeospatialData,
+ GeoUnit,
+ InfBound,
+ LexBoundary,
+ RangeByIndex,
+ RangeByLex,
+ RangeByScore,
+ ScoreBoundary,
+ ScoreFilter,
+)
+from glide.async_commands.stream import (
+ ExclusiveIdBound,
+ IdBound,
+ MaxId,
+ MinId,
+ StreamAddOptions,
+ StreamClaimOptions,
+ StreamGroupOptions,
+ StreamPendingOptions,
+ StreamReadGroupOptions,
+ StreamReadOptions,
+ TrimByMaxLen,
+ TrimByMinId,
+)
+from glide.async_commands.transaction import ClusterTransaction, Transaction
+from glide.config import (
+ GlideClientConfiguration,
+ GlideClusterClientConfiguration,
+ ProtocolVersion,
+ ServerCredentials,
+)
+from glide.constants import OK, TEncodable, TFunctionStatsSingleNodeResponse, TResult
+from glide.exceptions import TimeoutError as GlideTimeoutError
+from glide.glide_client import GlideClient, GlideClusterClient, TGlideClient
+from glide.routes import (
+ AllNodes,
+ AllPrimaries,
+ ByAddressRoute,
+ RandomNode,
+ Route,
+ SlotIdRoute,
+ SlotKeyRoute,
+ SlotType,
+)
+from glide.glide_sync_client import GlideSync
+from tests.conftest import create_client
+from tests.utils.utils import (
+ check_function_list_response,
+ check_function_stats_response,
+ check_if_server_version_lt,
+ compare_maps,
+ convert_bytes_to_string_object,
+ convert_string_to_bytes_object,
+ create_long_running_lua_script,
+ create_lua_lib_with_long_running_function,
+ generate_lua_lib_code,
+ get_first_result,
+ get_random_string,
+ is_single_response,
+ parse_info_response,
+ round_values,
+)
+
+
+@pytest.mark.asyncio
+class TestGlideClients:
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_register_client_name_and_version(self, glide_sync_client: GlideSync):
+ min_version = "7.2.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ # TODO: change it to pytest fixture after we'll implement a sync client
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+ info = glide_sync_client.custom_command(["CLIENT", "INFO"])
+ assert isinstance(info, bytes)
+ info_str = info.decode()
+ assert "lib-name=GlidePy" in info_str
+ assert "lib-ver=unknown" in info_str
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_send_and_receive_large_values(self, request, cluster_mode, protocol):
+ glide_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000
+ )
+ length = 2**25 # 33mb
+ key = "0" * length
+ value = "0" * length
+ assert len(key) == length
+ assert len(value) == length
+ glide_sync_client.set(key, value)
+ assert glide_sync_client.get(key) == value.encode()
+ glide_sync_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_send_and_receive_non_ascii_unicode(self, glide_sync_client: GlideSync):
+ key = "foo"
+ value = "שלום hello 汉字"
+ assert value == "שלום hello 汉字"
+ glide_sync_client.set(key, value)
+ assert glide_sync_client.get(key) == value.encode()
+ # check set and get in bytes
+ glide_sync_client.set(key.encode(), value.encode())
+ assert glide_sync_client.get(key.encode()) == value.encode()
+
+ @pytest.mark.parametrize("value_size", [100, 2**16])
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_client_handle_concurrent_workload_without_dropping_or_changing_values(
+ self, glide_sync_client: GlideSync, value_size
+ ):
+ num_of_concurrent_tasks = 100
+ running_tasks = set()
+
+ def exec_command(i):
+ range_end = 1 if value_size > 100 else 100
+ for _ in range(range_end):
+ value = get_random_string(value_size)
+ assert glide_sync_client.set(str(i), value) == OK
+ assert glide_sync_client.get(str(i)) == value.encode()
+
+ for i in range(num_of_concurrent_tasks):
+ task = asyncio.create_task(exec_command(i))
+ running_tasks.add(task)
+ task.add_done_callback(running_tasks.discard)
+ asyncio.gather(*(list(running_tasks)))
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_can_connect_with_auth_requirepass(
+ self, glide_sync_client: GlideSync, request
+ ):
+ is_cluster = isinstance(glide_sync_client, GlideClusterClient)
+ password = "TEST_AUTH"
+ credentials = ServerCredentials(password)
+ try:
+ glide_sync_client.custom_command(
+ ["CONFIG", "SET", "requirepass", password]
+ )
+
+ with pytest.raises(ClosingError, match="NOAUTH"):
+ # Creation of a new client without password should fail
+ create_client(
+ request,
+ is_cluster,
+ addresses=glide_sync_client.config.addresses,
+ )
+
+ auth_client = create_client(
+ request,
+ is_cluster,
+ credentials,
+ addresses=glide_sync_client.config.addresses,
+ )
+ key = get_random_string(10)
+ assert auth_client.set(key, key) == OK
+ assert auth_client.get(key) == key.encode()
+ auth_client.close()
+
+ finally:
+ # Reset the password
+ auth_client = create_client(
+ request,
+ is_cluster,
+ credentials,
+ addresses=glide_sync_client.config.addresses,
+ )
+ auth_client.custom_command(["CONFIG", "SET", "requirepass", ""])
+ auth_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_can_connect_with_auth_acl(
+ self, glide_sync_client: Union[GlideClient, GlideClusterClient], request
+ ):
+ is_cluster = isinstance(glide_sync_client, GlideClusterClient)
+ username = "testuser"
+ password = "TEST_AUTH"
+ try:
+ assert (
+ glide_sync_client.custom_command(
+ [
+ "ACL",
+ "SETUSER",
+ username,
+ "on",
+ "allkeys",
+ "+get",
+ "+cluster",
+ "+ping",
+ "+info",
+ "+client",
+ f">{password}",
+ ]
+ )
+ == OK
+ )
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, key) == OK
+ credentials = ServerCredentials(password, username)
+
+ testuser_client = create_client(
+ request,
+ is_cluster,
+ credentials,
+ addresses=glide_sync_client.config.addresses,
+ )
+ assert testuser_client.get(key) == key.encode()
+ with pytest.raises(RequestError) as e:
+ # This client isn't authorized to perform SET
+ testuser_client.set("foo", "bar")
+ assert "NOPERM" in str(e)
+ testuser_client.close()
+ finally:
+ # Delete this user
+ glide_sync_client.custom_command(["ACL", "DELUSER", username])
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ def test_sync_select_standalone_database_id(self, request, cluster_mode):
+ glide_sync_client = create_client(
+ request, cluster_mode=cluster_mode, database_id=4
+ )
+ client_info = glide_sync_client.custom_command(["CLIENT", "INFO"])
+ assert b"db=4" in client_info
+ glide_sync_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_client_name(self, request, cluster_mode, protocol):
+ glide_sync_client = create_client(
+ request,
+ cluster_mode=cluster_mode,
+ client_name="TEST_CLIENT_NAME",
+ protocol=protocol,
+ )
+ client_info = glide_sync_client.custom_command(["CLIENT", "INFO"])
+ assert b"name=TEST_CLIENT_NAME" in client_info
+ glide_sync_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_closed_client_raises_error(self, glide_sync_client: GlideSync):
+ glide_sync_client.close()
+ with pytest.raises(ClosingError) as e:
+ glide_sync_client.set("foo", "bar")
+ assert "the client is closed" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_statistics(self, glide_sync_client: GlideSync):
+ stats = glide_sync_client.get_statistics()
+ assert isinstance(stats, dict)
+ assert "total_connections" in stats
+ assert "total_clients" in stats
+ assert len(stats) == 2
+
+
+@pytest.mark.asyncio
+class TestCommands:
+ @pytest.mark.smoke_test
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_socket_set_get(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value = datetime.now(timezone.utc).strftime("%m/%d/%Y, %H:%M:%S")
+ assert glide_sync_client.set(key, value) == OK
+ assert glide_sync_client.get(key) == value.encode()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP3])
+ def test_sync_use_resp3_protocol(self, glide_sync_client: GlideSync):
+ result = cast(Dict[bytes, bytes], glide_sync_client.custom_command(["HELLO"]))
+
+ assert int(result[b"proto"]) == 3
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2])
+ def test_sync_allow_opt_in_to_resp2_protocol(self, glide_sync_client: GlideSync):
+ result = cast(Dict[bytes, bytes], glide_sync_client.custom_command(["HELLO"]))
+
+ assert int(result[b"proto"]) == 2
+
+ # Testing the inflight_requests_limit parameter in glide. Sending the allowed amount + 1 of requests
+ # to glide, using blocking commands, and checking the N+1 request returns immediately with error.
+ @pytest.mark.parametrize("cluster_mode", [False, True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ @pytest.mark.parametrize("inflight_requests_limit", [5, 100, 1500])
+ def test_sync_inflight_request_limit(
+ self, cluster_mode, protocol, inflight_requests_limit, request
+ ):
+ key1 = f"{{nonexistinglist}}:1-{get_random_string(10)}"
+ test_sync_client = create_client(
+ request=request,
+ protocol=protocol,
+ cluster_mode=cluster_mode,
+ inflight_requests_limit=inflight_requests_limit,
+ )
+
+ tasks = []
+ for i in range(inflight_requests_limit + 1):
+ coro = test_sync_client.blpop([key1], 0)
+ task = asyncio.create_task(coro)
+ tasks.append(task)
+
+ done, pending = asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
+
+ for task in done:
+ with pytest.raises(RequestError) as e:
+ task
+ assert "maximum inflight requests" in str(e)
+
+ for task in pending:
+ task.cancel()
+
+ test_sync_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_conditional_set(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value = get_random_string(10)
+ res = glide_sync_client.set(
+ key, value, conditional_set=ConditionalChange.ONLY_IF_EXISTS
+ )
+ assert res is None
+ res = glide_sync_client.set(
+ key, value, conditional_set=ConditionalChange.ONLY_IF_DOES_NOT_EXIST
+ )
+ assert res == OK
+ assert glide_sync_client.get(key) == value.encode()
+ res = glide_sync_client.set(
+ key, "foobar", conditional_set=ConditionalChange.ONLY_IF_DOES_NOT_EXIST
+ )
+ assert res is None
+ assert glide_sync_client.get(key) == value.encode()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_set_return_old_value(self, glide_sync_client: GlideSync):
+ min_version = "6.2.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ # TODO: change it to pytest fixture after we'll implement a sync client
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+ key = get_random_string(10)
+ value = get_random_string(10)
+ res = glide_sync_client.set(key, value)
+ assert res == OK
+ assert glide_sync_client.get(key) == value.encode()
+ new_value = get_random_string(10)
+ res = glide_sync_client.set(key, new_value, return_old_value=True)
+ assert res == value.encode()
+ assert glide_sync_client.get(key) == new_value.encode()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_custom_command_single_arg(self, glide_sync_client: GlideSync):
+ # Test single arg command
+ res = glide_sync_client.custom_command(["PING"])
+ assert res == b"PONG"
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_custom_command_multi_arg(self, glide_sync_client: GlideSync):
+ # Test multi args command
+ client_list = glide_sync_client.custom_command(
+ ["CLIENT", "LIST", "TYPE", "NORMAL"]
+ )
+ assert isinstance(client_list, (bytes, list))
+ res = get_first_result(client_list)
+ assert res is not None
+ assert b"id" in res
+ assert b"cmd=client" in res
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_custom_command_multi_arg_in_TEncodable(
+ self, glide_sync_client: GlideSync
+ ):
+ # Test multi args command
+ client_list = glide_sync_client.custom_command(
+ ["CLIENT", b"LIST", "TYPE", b"NORMAL"]
+ )
+ assert isinstance(client_list, (bytes, list))
+ res = get_first_result(client_list)
+ assert res is not None
+ assert b"id" in res
+ assert b"cmd=client" in res
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_custom_command_lower_and_upper_case(
+ self, glide_sync_client: GlideSync
+ ):
+ # Test multi args command
+ client_list = glide_sync_client.custom_command(
+ ["CLIENT", "LIST", "TYPE", "NORMAL"]
+ )
+ assert isinstance(client_list, (bytes, list))
+ res = get_first_result(client_list)
+ assert res is not None
+ assert b"id" in res
+ assert b"cmd=client" in res
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_request_error_raises_exception(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value = get_random_string(10)
+ glide_sync_client.set(key, value)
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.custom_command(["HSET", key, "1", "bar"])
+ assert "WRONGTYPE" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_info_server_replication(self, glide_sync_client: GlideSync):
+ info_res = get_first_result(glide_sync_client.info([InfoSection.SERVER]))
+ info = info_res.decode()
+ assert "# Server" in info
+ info = get_first_result(
+ glide_sync_client.info([InfoSection.REPLICATION])
+ ).decode()
+ assert "# Replication" in info
+ assert "# Errorstats" not in info
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_info_default(self, glide_sync_client: GlideSync):
+ cluster_mode = isinstance(glide_sync_client, GlideClusterClient)
+ info_result = glide_sync_client.info()
+ if cluster_mode:
+ cluster_nodes = glide_sync_client.custom_command(["CLUSTER", "NODES"])
+ assert isinstance(cluster_nodes, (bytes, list))
+ cluster_nodes = get_first_result(cluster_nodes)
+ expected_num_of_results = cluster_nodes.count(b"master")
+ assert len(info_result) == expected_num_of_results
+ info_result = get_first_result(info_result)
+ assert b"# Memory" in info_result
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_select(self, glide_sync_client: GlideClient):
+ assert glide_sync_client.select(0) == OK
+ key = get_random_string(10)
+ value = get_random_string(10)
+ assert glide_sync_client.set(key, value) == OK
+ assert glide_sync_client.get(key) == value.encode()
+ assert glide_sync_client.select(1) == OK
+ assert glide_sync_client.get(key) is None
+ assert glide_sync_client.select(0) == OK
+ assert glide_sync_client.get(key) == value.encode()
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_move(self, glide_sync_client: GlideClient):
+ key = get_random_string(10)
+ value = get_random_string(10)
+
+ assert glide_sync_client.select(0) == OK
+ assert glide_sync_client.move(key, 1) is False
+
+ assert glide_sync_client.set(key, value) == OK
+ assert glide_sync_client.get(key) == value.encode()
+
+ assert glide_sync_client.move(key, 1) is True
+ assert glide_sync_client.get(key) is None
+ assert glide_sync_client.select(1) == OK
+ assert glide_sync_client.get(key) == value.encode()
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.move(key, -1)
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_move_with_bytes(self, glide_sync_client: GlideClient):
+ key = get_random_string(10)
+ value = get_random_string(10)
+
+ assert glide_sync_client.select(0) == OK
+
+ assert glide_sync_client.set(key, value) == OK
+ assert glide_sync_client.get(key.encode()) == value.encode()
+
+ assert glide_sync_client.move(key.encode(), 1) is True
+ assert glide_sync_client.get(key) is None
+ assert glide_sync_client.get(key.encode()) is None
+ assert glide_sync_client.select(1) == OK
+ assert glide_sync_client.get(key) == value.encode()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_delete(self, glide_sync_client: GlideSync):
+ keys = [get_random_string(10), get_random_string(10), get_random_string(10)]
+ value = get_random_string(10)
+ value_encoded = value.encode()
+ [glide_sync_client.set(key, value) for key in keys]
+ assert glide_sync_client.get(keys[0]) == value_encoded
+ assert glide_sync_client.get(keys[1]) == value_encoded
+ assert glide_sync_client.get(keys[2]) == value_encoded
+ delete_keys = keys + [get_random_string(10)]
+ assert glide_sync_client.delete(delete_keys) == 3
+ assert glide_sync_client.delete(keys) == 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_getdel(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ list_key = get_random_string(10)
+ assert glide_sync_client.set(key, value) == "OK"
+
+ # Retrieve and delete existing key
+ assert glide_sync_client.getdel(key) == value.encode()
+ assert glide_sync_client.get(key) is None
+
+ # Try to get and delete a non-existing key
+ assert glide_sync_client.getdel(non_existing_key) is None
+
+ assert glide_sync_client.lpush(list_key, [value]) == 1
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.getdel(list_key)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_getrange(self, glide_sync_client: GlideSync):
+ key = get_random_string(16)
+ value = get_random_string(10)
+ value_encoded = value.encode()
+ non_string_key = get_random_string(10)
+
+ assert glide_sync_client.set(key, value) == OK
+ assert glide_sync_client.getrange(key, 0, 3) == value_encoded[:4]
+ assert glide_sync_client.getrange(key, -3, -1) == value_encoded[-3:]
+ assert glide_sync_client.getrange(key.encode(), -3, -1) == value_encoded[-3:]
+ assert glide_sync_client.getrange(key, 0, -1) == value_encoded
+
+ # out of range
+ assert glide_sync_client.getrange(key, 10, 100) == value_encoded[10:]
+ assert glide_sync_client.getrange(key, -200, -3) == value_encoded[-200:-2]
+ assert glide_sync_client.getrange(key, 100, 200) == b""
+
+ # incorrect range
+ assert glide_sync_client.getrange(key, -1, -3) == b""
+
+ assert glide_sync_client.getrange(key, -200, -100) == value[0].encode()
+
+ assert glide_sync_client.getrange(non_string_key, 0, -1) == b""
+
+ # non-string key
+ assert glide_sync_client.lpush(non_string_key, ["_"]) == 1
+ with pytest.raises(RequestError):
+ glide_sync_client.getrange(non_string_key, 0, -1)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_config_reset_stat(self, glide_sync_client: GlideSync):
+ # we execute set and info so the commandstats will show `cmdstat_set::calls` greater than 1
+ # after the configResetStat call we initiate an info command and the the commandstats won't contain `cmdstat_set`.
+ glide_sync_client.set("foo", "bar")
+ info_stats = str(glide_sync_client.info([InfoSection.COMMAND_STATS]))
+
+ assert "cmdstat_set" in info_stats
+
+ assert glide_sync_client.config_resetstat() == OK
+ info_stats = str(glide_sync_client.info([InfoSection.COMMAND_STATS]))
+
+ # 1 stands for the second info command
+ assert "cmdstat_set" not in info_stats
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_config_rewrite(self, glide_sync_client: GlideSync):
+ info_server = parse_info_response(
+ get_first_result(glide_sync_client.info([InfoSection.SERVER]))
+ )
+ if len(info_server["config_file"]) > 0:
+ assert glide_sync_client.config_rewrite() == OK
+ else:
+ # We expect Valkey to return an error since the test cluster doesn't use valkey.conf file
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.config_rewrite()
+ assert "The server is running without a config file" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_client_id(self, glide_sync_client: GlideSync):
+ client_id = glide_sync_client.client_id()
+ assert type(client_id) is int
+ assert client_id > 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_incr_commands_existing_key(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "10") == OK
+ assert glide_sync_client.incr(key) == 11
+ assert glide_sync_client.get(key) == b"11"
+ assert glide_sync_client.incrby(key, 4) == 15
+ assert glide_sync_client.get(key) == b"15"
+ assert glide_sync_client.incrbyfloat(key, 5.5) == 20.5
+ assert glide_sync_client.get(key) == b"20.5"
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_incr_commands_non_existing_key(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ key2 = get_random_string(10)
+ key3 = get_random_string(10)
+
+ assert glide_sync_client.get(key) is None
+ assert glide_sync_client.incr(key) == 1
+ assert glide_sync_client.get(key) == b"1"
+
+ assert glide_sync_client.get(key2) is None
+ assert glide_sync_client.incrby(key2, 3) == 3
+ assert glide_sync_client.get(key2) == b"3"
+
+ assert glide_sync_client.get(key3) is None
+ assert glide_sync_client.incrbyfloat(key3, 0.5) == 0.5
+ assert glide_sync_client.get(key3) == b"0.5"
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_incr_commands_with_str_value(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "foo") == OK
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.incr(key)
+
+ assert "value is not an integer" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.incrby(key, 3)
+
+ assert "value is not an integer" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.incrbyfloat(key, 3.5)
+ assert "value is not a valid float" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_client_getname(self, glide_sync_client: GlideSync):
+ assert glide_sync_client.client_getname() is None
+ assert (
+ glide_sync_client.custom_command(["CLIENT", "SETNAME", "GlideConnection"])
+ == OK
+ )
+ assert glide_sync_client.client_getname() == b"GlideConnection"
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_mset_mget(self, glide_sync_client: GlideSync):
+ keys = [get_random_string(10), get_random_string(10), get_random_string(10)]
+ non_existing_key = get_random_string(10)
+ key_value_pairs = {key: value for key, value in zip(keys, keys)}
+
+ assert glide_sync_client.mset(key_value_pairs) == OK
+
+ # Add the non-existing key
+ keys.append(non_existing_key)
+ mget_res = glide_sync_client.mget(keys)
+ keys[-1] = None
+ assert mget_res == [key.encode() if key is not None else key for key in keys]
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_touch(self, glide_sync_client: GlideSync):
+ keys = [get_random_string(10), get_random_string(10)]
+ key_value_pairs = {key: value for key, value in zip(keys, keys)}
+
+ assert glide_sync_client.mset(key_value_pairs) == OK
+ assert glide_sync_client.touch(keys) == 2
+
+ # 2 existing keys, one non-existing
+ assert glide_sync_client.touch([*keys, get_random_string(3)]) == 2
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_msetnx(self, glide_sync_client: GlideSync):
+ key1 = f"{{key}}-1{get_random_string(5)}"
+ key2 = f"{{key}}-2{get_random_string(5)}"
+ key3 = f"{{key}}-3{get_random_string(5)}"
+ non_existing = get_random_string(5)
+ value = get_random_string(5)
+ value_encoded = value.encode()
+ key_value_map1: Mapping[TEncodable, TEncodable] = {key1: value, key2: value}
+ key_value_map2: Mapping[TEncodable, TEncodable] = {
+ key2: get_random_string(5),
+ key3: value,
+ }
+
+ assert glide_sync_client.msetnx(key_value_map1) is True
+ mget_res = glide_sync_client.mget([key1, key2, non_existing])
+ assert mget_res == [value_encoded, value_encoded, None]
+
+ assert glide_sync_client.msetnx(key_value_map2) is False
+ assert glide_sync_client.get(key3) is None
+ assert glide_sync_client.get(key2) == value_encoded
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_ping(self, glide_sync_client: GlideSync):
+ assert glide_sync_client.ping() == b"PONG"
+ assert glide_sync_client.ping("HELLO") == b"HELLO"
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_config_get_set(self, glide_sync_client: GlideSync):
+ previous_timeout = glide_sync_client.config_get(["timeout"])
+ assert glide_sync_client.config_set({"timeout": "1000"}) == OK
+ assert glide_sync_client.config_get(["timeout"]) == {b"timeout": b"1000"}
+ # revert changes to previous timeout
+ previous_timeout_decoded = convert_bytes_to_string_object(previous_timeout)
+ assert isinstance(previous_timeout_decoded, dict)
+ assert isinstance(previous_timeout_decoded["timeout"], str)
+ assert (
+ glide_sync_client.config_set(
+ {"timeout": previous_timeout_decoded["timeout"]}
+ )
+ == OK
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_decr_decrby_existing_key(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "10") == OK
+ assert glide_sync_client.decr(key) == 9
+ assert glide_sync_client.get(key) == b"9"
+ assert glide_sync_client.decrby(key, 4) == 5
+ assert glide_sync_client.get(key) == b"5"
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_decr_decrby_non_existing_key(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ key2 = get_random_string(10)
+
+ assert glide_sync_client.get(key) is None
+ assert glide_sync_client.decr(key) == -1
+ assert glide_sync_client.get(key) == b"-1"
+
+ assert glide_sync_client.get(key2) is None
+ assert glide_sync_client.decrby(key2, 3) == -3
+ assert glide_sync_client.get(key2) == b"-3"
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_decr_with_str_value(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "foo") == OK
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.decr(key)
+
+ assert "value is not an integer" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.decrby(key, 3)
+
+ assert "value is not an integer" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_setrange(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+
+ # test new key and existing key
+ assert glide_sync_client.setrange(key1, 0, "Hello World") == 11
+ assert glide_sync_client.setrange(key1, 6, "GLIDE") == 11
+
+ # offset > len
+ assert glide_sync_client.setrange(key1, 15, "GLIDE") == 20
+
+ # negative offset
+ with pytest.raises(RequestError):
+ assert glide_sync_client.setrange(key1, -1, "GLIDE")
+
+ # non-string key throws RequestError
+ assert glide_sync_client.lpush(key2, ["_"]) == 1
+ with pytest.raises(RequestError):
+ assert glide_sync_client.setrange(key2, 0, "_")
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hset_hget_hgetall(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 2
+ assert glide_sync_client.hget(key, field) == b"value"
+ assert glide_sync_client.hget(key, field2) == b"value2"
+ assert glide_sync_client.hget(key, "non_existing_field") is None
+
+ hgetall_map = glide_sync_client.hgetall(key)
+ expected_map = {
+ field.encode(): b"value",
+ field2.encode(): b"value2",
+ }
+ assert compare_maps(hgetall_map, expected_map) is True
+ assert glide_sync_client.hgetall("non_existing_field") == {}
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hdel(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field3 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2", field3: "value3"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 3
+ assert glide_sync_client.hdel(key, [field, field2]) == 2
+ assert glide_sync_client.hdel(key, ["nonExistingField"]) == 0
+ assert glide_sync_client.hdel("nonExistingKey", [field3]) == 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hsetnx(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ field = get_random_string(5)
+
+ assert glide_sync_client.hsetnx(key, field, "value") == True
+ assert glide_sync_client.hsetnx(key, field, "new value") == False
+ assert glide_sync_client.hget(key, field) == b"value"
+ key = get_random_string(5)
+ assert glide_sync_client.set(key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.hsetnx(key, field, "value")
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hmget(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 2
+ assert glide_sync_client.hmget(key, [field, "nonExistingField", field2]) == [
+ b"value",
+ None,
+ b"value2",
+ ]
+ assert glide_sync_client.hmget("nonExistingKey", [field, field2]) == [
+ None,
+ None,
+ ]
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hset_without_data(self, glide_sync_client: GlideSync):
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.hset("key", {})
+
+ assert "wrong number of arguments" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hincrby_hincrbyfloat(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ field = get_random_string(5)
+ field_value_map = {field: "10"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 1
+ assert glide_sync_client.hincrby(key, field, 1) == 11
+ assert glide_sync_client.hincrby(key, field, 4) == 15
+ assert glide_sync_client.hincrbyfloat(key, field, 1.5) == 16.5
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hincrby_non_existing_key_field(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ key2 = get_random_string(10)
+ field = get_random_string(5)
+ field_value_map = {field: "10"}
+
+ assert glide_sync_client.hincrby("nonExistingKey", field, 1) == 1
+ assert glide_sync_client.hset(key, field_value_map) == 1
+ assert glide_sync_client.hincrby(key, "nonExistingField", 2) == 2
+ assert glide_sync_client.hset(key2, field_value_map) == 1
+ assert glide_sync_client.hincrbyfloat(key2, "nonExistingField", -0.5) == -0.5
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hincrby_invalid_value(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ field = get_random_string(5)
+ field_value_map = {field: "value"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 1
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.hincrby(key, field, 2)
+ assert "hash value is not an integer" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.hincrbyfloat(key, field, 1.5)
+ assert "hash value is not a float" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hexist(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 2
+ assert glide_sync_client.hexists(key, field)
+ assert not glide_sync_client.hexists(key, "nonExistingField")
+ assert not glide_sync_client.hexists("nonExistingKey", field2)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hlen(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ key2 = get_random_string(5)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 2
+ assert glide_sync_client.hlen(key) == 2
+ assert glide_sync_client.hdel(key, [field]) == 1
+ assert glide_sync_client.hlen(key) == 1
+ assert glide_sync_client.hlen("non_existing_hash") == 0
+
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.hlen(key2)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hvals(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ key2 = get_random_string(5)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 2
+ assert glide_sync_client.hvals(key) == [b"value", b"value2"]
+ assert glide_sync_client.hdel(key, [field]) == 1
+ assert glide_sync_client.hvals(key) == [b"value2"]
+ assert glide_sync_client.hvals("non_existing_key") == []
+
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.hvals(key2)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hkeys(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ key2 = get_random_string(5)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 2
+ assert glide_sync_client.hkeys(key) == [
+ field.encode(),
+ field2.encode(),
+ ]
+ assert glide_sync_client.hdel(key, [field]) == 1
+ assert glide_sync_client.hkeys(key) == [field2.encode()]
+ assert glide_sync_client.hkeys("non_existing_key") == []
+
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.hkeys(key2)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hrandfield(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ key2 = get_random_string(5)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 2
+ assert glide_sync_client.hrandfield(key) in [
+ field.encode(),
+ field2.encode(),
+ ]
+ assert glide_sync_client.hrandfield("non_existing_key") is None
+
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.hrandfield(key2)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hrandfield_count(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ key2 = get_random_string(5)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 2
+ # Unique values are expected as count is positive
+ rand_fields = glide_sync_client.hrandfield_count(key, 4)
+ assert len(rand_fields) == 2
+ assert set(rand_fields) == {field.encode(), field2.encode()}
+
+ # Duplicate values are expected as count is negative
+ rand_fields = glide_sync_client.hrandfield_count(key, -4)
+ assert len(rand_fields) == 4
+ for rand_field in rand_fields:
+ assert rand_field in [field.encode(), field2.encode()]
+
+ assert glide_sync_client.hrandfield_count(key, 0) == []
+ assert glide_sync_client.hrandfield_count("non_existing_key", 4) == []
+
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.hrandfield_count(key2, 5)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hrandfield_withvalues(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ key2 = get_random_string(5)
+ field = get_random_string(5)
+ field2 = get_random_string(5)
+ field_value_map = {field: "value", field2: "value2"}
+
+ assert glide_sync_client.hset(key, field_value_map) == 2
+ # Unique values are expected as count is positive
+ rand_fields_with_values = glide_sync_client.hrandfield_withvalues(key, 4)
+ assert len(rand_fields_with_values) == 2
+ for field_with_value in rand_fields_with_values:
+ assert field_with_value in [
+ [field.encode(), b"value"],
+ [field2.encode(), b"value2"],
+ ]
+
+ # Duplicate values are expected as count is negative
+ rand_fields_with_values = glide_sync_client.hrandfield_withvalues(key, -4)
+ assert len(rand_fields_with_values) == 4
+ for field_with_value in rand_fields_with_values:
+ assert field_with_value in [
+ [field.encode(), b"value"],
+ [field2.encode(), b"value2"],
+ ]
+
+ assert glide_sync_client.hrandfield_withvalues(key, 0) == []
+ assert glide_sync_client.hrandfield_withvalues("non_existing_key", 4) == []
+
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.hrandfield_withvalues(key2, 5)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hstrlen(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+
+ assert glide_sync_client.hstrlen(key, "field") == 0
+ assert glide_sync_client.hset(key, {"field": "value"}) == 1
+ assert glide_sync_client.hstrlen(key, "field") == 5
+
+ assert glide_sync_client.hstrlen(key, "field2") == 0
+
+ glide_sync_client.set(key, "value")
+ with pytest.raises(RequestError):
+ glide_sync_client.hstrlen(key, "field")
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lpush_lpop_lrange(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value_list: List[TEncodable] = ["value4", "value3", "value2", "value1"]
+
+ assert glide_sync_client.lpush(key, value_list) == 4
+ assert glide_sync_client.lpop(key) == cast(str, value_list[-1]).encode()
+ assert glide_sync_client.lrange(key, 0, -1) == convert_string_to_bytes_object(
+ value_list[-2::-1]
+ )
+ assert glide_sync_client.lpop_count(key, 2) == convert_string_to_bytes_object(
+ value_list[-2:0:-1]
+ )
+ assert glide_sync_client.lrange("non_existing_key", 0, -1) == []
+ assert glide_sync_client.lpop("non_existing_key") is None
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lpush_lpop_lrange_wrong_type_raise_error(
+ self, glide_sync_client: GlideSync
+ ):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "foo") == OK
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.lpush(key, ["bar"])
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.lpop(key)
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.lrange(key, 0, -1)
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lpushx(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+
+ # new key
+ assert glide_sync_client.lpushx(key1, ["1"]) == 0
+ assert glide_sync_client.lrange(key1, 0, -1) == []
+ # existing key
+ assert glide_sync_client.lpush(key1, ["0"]) == 1
+ assert glide_sync_client.lpushx(key1, ["1", "2", "3"]) == 4
+ assert glide_sync_client.lrange(key1, 0, -1) == convert_string_to_bytes_object(
+ ["3", "2", "1", "0"]
+ )
+ # key exists, but not a list
+ assert glide_sync_client.set(key2, "bar") == OK
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.lpushx(key2, ["_"])
+ # incorrect arguments
+ with pytest.raises(RequestError):
+ glide_sync_client.lpushx(key1, [])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_blpop(self, glide_sync_client: GlideSync):
+ key1 = f"{{test}}-1-f{get_random_string(10)}"
+ key2 = f"{{test}}-2-f{get_random_string(10)}"
+ value1 = "value1"
+ value2 = "value2"
+ value_list: List[TEncodable] = [value1, value2]
+
+ assert glide_sync_client.lpush(key1, value_list) == 2
+ assert glide_sync_client.blpop(
+ [key1, key2], 0.5
+ ) == convert_string_to_bytes_object([key1, value2])
+ # ensure that command doesn't time out even if timeout > request timeout (250ms by default)
+ assert glide_sync_client.blpop(["non_existent_key"], 0.5) is None
+
+ # key exists, but not a list
+ assert glide_sync_client.set("foo", "bar")
+ with pytest.raises(RequestError):
+ glide_sync_client.blpop(["foo"], 0.001)
+
+ def endless_blpop_call():
+ glide_sync_client.blpop(["non_existent_key"], 0)
+
+ # blpop is called against a non-existing key with no timeout, but we wrap the call in an asyncio timeout to
+ # avoid having the test block forever
+ with pytest.raises(asyncio.TimeoutError):
+ asyncio.wait_for(endless_blpop_call(), timeout=3)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lmpop(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ key1 = f"{{test}}-1-f{get_random_string(10)}"
+ key2 = f"{{test}}-2-f{get_random_string(10)}"
+ key3 = f"{{test}}-3-f{get_random_string(10)}"
+
+ # Initialize the lists
+ assert glide_sync_client.lpush(key1, ["3", "2", "1"]) == 3
+ assert glide_sync_client.lpush(key2, ["6", "5", "4"]) == 3
+
+ # Pop from LEFT
+ result = glide_sync_client.lmpop([key1, key2], ListDirection.LEFT, 2)
+ expected_result = {key1: ["1", "2"]}
+ assert compare_maps(result, expected_result) is True
+
+ # Pop from RIGHT
+ result = glide_sync_client.lmpop([key2, key1], ListDirection.RIGHT, 2)
+ expected_result = {key2: ["6", "5"]}
+ assert compare_maps(result, expected_result) is True
+
+ # Pop without count (default is 1)
+ result = glide_sync_client.lmpop([key1, key2], ListDirection.LEFT)
+ expected_result = {key1: ["3"]}
+ assert compare_maps(result, expected_result) is True
+
+ # Non-existing key
+ result = glide_sync_client.lmpop([key3], ListDirection.LEFT, 1)
+ assert result is None
+
+ # Non-list key
+ assert glide_sync_client.set(key3, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.lmpop([key3], ListDirection.LEFT, 1)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_blmpop(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ key1 = f"{{test}}-1-f{get_random_string(10)}"
+ key2 = f"{{test}}-2-f{get_random_string(10)}"
+ key3 = f"{{test}}-3-f{get_random_string(10)}"
+ key4 = f"{{test}}-4-f{get_random_string(10)}"
+
+ # Initialize the lists
+ assert glide_sync_client.lpush(key1, ["3", "2", "1"]) == 3
+ assert glide_sync_client.lpush(key2, ["6", "5", "4"]) == 3
+
+ # Pop from LEFT with blocking
+ result = glide_sync_client.blmpop([key1, key2], ListDirection.LEFT, 0.1, 2)
+ expected_result = {key1: ["1", "2"]}
+ assert compare_maps(result, expected_result) is True
+
+ # Pop from RIGHT with blocking
+ result = glide_sync_client.blmpop([key2, key1], ListDirection.RIGHT, 0.1, 2)
+ expected_result = {key2: ["6", "5"]}
+ assert compare_maps(result, expected_result) is True
+
+ # Pop without count (default is 1)
+ result = glide_sync_client.blmpop([key1, key2], ListDirection.LEFT, 0.1)
+ expected_result = {key1: ["3"]}
+ assert compare_maps(result, expected_result) is True
+
+ # Non-existing key with blocking
+ result = glide_sync_client.blmpop([key3], ListDirection.LEFT, 0.1, 1)
+ assert result is None
+
+ # Non-list key with blocking
+ assert glide_sync_client.set(key4, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.blmpop([key4], ListDirection.LEFT, 0.1, 1)
+
+ # BLMPOP is called against a non-existing key with no timeout, but we wrap the call in an asyncio timeout to
+ # avoid having the test block forever
+ def endless_blmpop_call():
+ glide_sync_client.blmpop([key3], ListDirection.LEFT, 0, 1)
+
+ with pytest.raises(asyncio.TimeoutError):
+ asyncio.wait_for(endless_blmpop_call(), timeout=3)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lindex(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value_list = [get_random_string(5), get_random_string(5)]
+ assert glide_sync_client.lpush(key, value_list) == 2
+ assert glide_sync_client.lindex(key, 0) == value_list[1].encode()
+ assert glide_sync_client.lindex(key, 1) == value_list[0].encode()
+ assert glide_sync_client.lindex(key, 3) is None
+ assert glide_sync_client.lindex("non_existing_key", 0) is None
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_rpush_rpop(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value_list: List[TEncodable] = ["value4", "value3", "value2", "value1"]
+
+ assert glide_sync_client.rpush(key, value_list) == 4
+ assert glide_sync_client.rpop(key) == cast(str, value_list[-1]).encode()
+
+ assert glide_sync_client.rpop_count(key, 2) == convert_string_to_bytes_object(
+ value_list[-2:0:-1]
+ )
+ assert glide_sync_client.rpop("non_existing_key") is None
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_rpush_rpop_wrong_type_raise_error(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "foo") == OK
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.rpush(key, ["bar"])
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.rpop(key)
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_rpushx(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+
+ # new key
+ assert glide_sync_client.rpushx(key1, ["1"]) == 0
+ assert glide_sync_client.lrange(key1, 0, -1) == []
+ # existing key
+ assert glide_sync_client.rpush(key1, ["0"]) == 1
+ assert glide_sync_client.rpushx(key1, ["1", "2", "3"]) == 4
+ assert glide_sync_client.lrange(key1, 0, -1) == convert_string_to_bytes_object(
+ ["0", "1", "2", "3"]
+ )
+ # key existing, but it is not a list
+ assert glide_sync_client.set(key2, "bar") == OK
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.rpushx(key2, ["_"])
+ # incorrect arguments
+ with pytest.raises(RequestError):
+ glide_sync_client.rpushx(key2, [])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_brpop(self, glide_sync_client: GlideSync):
+ key1 = f"{{test}}-1-f{get_random_string(10)}"
+ key2 = f"{{test}}-2-f{get_random_string(10)}"
+ value1 = "value1"
+ value2 = "value2"
+ value_list: List[TEncodable] = [value1, value2]
+
+ assert glide_sync_client.lpush(key1, value_list) == 2
+ # ensure that command doesn't time out even if timeout > request timeout (250ms by default)
+ assert glide_sync_client.brpop(
+ [key1, key2], 0.5
+ ) == convert_string_to_bytes_object([key1, value1])
+
+ assert glide_sync_client.brpop(["non_existent_key"], 0.5) is None
+
+ # key exists, but not a list
+ assert glide_sync_client.set("foo", "bar")
+ with pytest.raises(RequestError):
+ glide_sync_client.brpop(["foo"], 0.001)
+
+ def endless_brpop_call():
+ glide_sync_client.brpop(["non_existent_key"], 0)
+
+ # brpop is called against a non-existing key with no timeout, but we wrap the call in an asyncio timeout to
+ # avoid having the test block forever
+ with pytest.raises(asyncio.TimeoutError):
+ asyncio.wait_for(endless_brpop_call(), timeout=3)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_linsert(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+
+ assert glide_sync_client.lpush(key1, ["4", "3", "2", "1"]) == 4
+ assert glide_sync_client.linsert(key1, InsertPosition.BEFORE, "2", "1.5") == 5
+ assert glide_sync_client.linsert(key1, InsertPosition.AFTER, "3", "3.5") == 6
+ assert glide_sync_client.lrange(key1, 0, -1) == convert_string_to_bytes_object(
+ [
+ "1",
+ "1.5",
+ "2",
+ "3",
+ "3.5",
+ "4",
+ ]
+ )
+
+ assert (
+ glide_sync_client.linsert(
+ "non_existing_key", InsertPosition.BEFORE, "pivot", "elem"
+ )
+ == 0
+ )
+ assert glide_sync_client.linsert(key1, InsertPosition.AFTER, "5", "6") == -1
+
+ # key exists, but it is not a list
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.linsert(key2, InsertPosition.AFTER, "p", "e")
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lmove(self, glide_sync_client: GlideSync):
+ key1 = "{SameSlot}" + get_random_string(10)
+ key2 = "{SameSlot}" + get_random_string(10)
+
+ # Initialize the lists
+ assert glide_sync_client.lpush(key1, ["2", "1"]) == 2
+ assert glide_sync_client.lpush(key2, ["4", "3"]) == 2
+
+ # Move from LEFT to LEFT
+ assert (
+ glide_sync_client.lmove(key1, key2, ListDirection.LEFT, ListDirection.LEFT)
+ == b"1"
+ )
+ assert glide_sync_client.lrange(key1, 0, -1) == convert_string_to_bytes_object(
+ ["2"]
+ )
+ assert glide_sync_client.lrange(key2, 0, -1) == convert_string_to_bytes_object(
+ ["1", "3", "4"]
+ )
+
+ # Move from LEFT to RIGHT
+ assert (
+ glide_sync_client.lmove(
+ key1, key2, ListDirection.LEFT, ListDirection.RIGHT
+ )
+ == b"2"
+ )
+ assert glide_sync_client.lrange(key1, 0, -1) == []
+ assert glide_sync_client.lrange(key2, 0, -1) == convert_string_to_bytes_object(
+ ["1", "3", "4", "2"]
+ )
+
+ # Move from RIGHT to LEFT - non-existing destination key
+ assert (
+ glide_sync_client.lmove(
+ key2, key1, ListDirection.RIGHT, ListDirection.LEFT
+ )
+ == b"2"
+ )
+ assert glide_sync_client.lrange(key2, 0, -1) == convert_string_to_bytes_object(
+ ["1", "3", "4"]
+ )
+ assert glide_sync_client.lrange(key1, 0, -1) == convert_string_to_bytes_object(
+ ["2"]
+ )
+
+ # Move from RIGHT to RIGHT
+ assert (
+ glide_sync_client.lmove(
+ key2, key1, ListDirection.RIGHT, ListDirection.RIGHT
+ )
+ == b"4"
+ )
+ assert glide_sync_client.lrange(key2, 0, -1) == convert_string_to_bytes_object(
+ ["1", "3"]
+ )
+ assert glide_sync_client.lrange(key1, 0, -1) == convert_string_to_bytes_object(
+ ["2", "4"]
+ )
+
+ # Non-existing source key
+ assert (
+ glide_sync_client.lmove(
+ "{SameSlot}non_existing_key",
+ key1,
+ ListDirection.LEFT,
+ ListDirection.LEFT,
+ )
+ is None
+ )
+
+ # Non-list source key
+ key3 = get_random_string(10)
+ assert glide_sync_client.set(key3, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.lmove(key3, key1, ListDirection.LEFT, ListDirection.LEFT)
+
+ # Non-list destination key
+ with pytest.raises(RequestError):
+ glide_sync_client.lmove(key1, key3, ListDirection.LEFT, ListDirection.LEFT)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_blmove(self, glide_sync_client: GlideSync):
+ key1 = "{SameSlot}" + get_random_string(10)
+ key2 = "{SameSlot}" + get_random_string(10)
+
+ # Initialize the lists
+ assert glide_sync_client.lpush(key1, ["2", "1"]) == 2
+ assert glide_sync_client.lpush(key2, ["4", "3"]) == 2
+
+ # Move from LEFT to LEFT with blocking
+ assert (
+ glide_sync_client.blmove(
+ key1, key2, ListDirection.LEFT, ListDirection.LEFT, 0.1
+ )
+ == b"1"
+ )
+ assert glide_sync_client.lrange(key1, 0, -1) == convert_string_to_bytes_object(
+ ["2"]
+ )
+ assert glide_sync_client.lrange(key2, 0, -1) == convert_string_to_bytes_object(
+ ["1", "3", "4"]
+ )
+
+ # Move from LEFT to RIGHT with blocking
+ assert (
+ glide_sync_client.blmove(
+ key1, key2, ListDirection.LEFT, ListDirection.RIGHT, 0.1
+ )
+ == b"2"
+ )
+ assert glide_sync_client.lrange(key1, 0, -1) == []
+ assert glide_sync_client.lrange(key2, 0, -1) == convert_string_to_bytes_object(
+ ["1", "3", "4", "2"]
+ )
+
+ # Move from RIGHT to LEFT non-existing destination with blocking
+ assert (
+ glide_sync_client.blmove(
+ key2, key1, ListDirection.RIGHT, ListDirection.LEFT, 0.1
+ )
+ == b"2"
+ )
+ assert glide_sync_client.lrange(key2, 0, -1) == convert_string_to_bytes_object(
+ ["1", "3", "4"]
+ )
+ assert glide_sync_client.lrange(key1, 0, -1) == convert_string_to_bytes_object(
+ ["2"]
+ )
+
+ # Move from RIGHT to RIGHT with blocking
+ assert (
+ glide_sync_client.blmove(
+ key2, key1, ListDirection.RIGHT, ListDirection.RIGHT, 0.1
+ )
+ == b"4"
+ )
+ assert glide_sync_client.lrange(key2, 0, -1) == convert_string_to_bytes_object(
+ ["1", "3"]
+ )
+ assert glide_sync_client.lrange(key1, 0, -1) == convert_string_to_bytes_object(
+ ["2", "4"]
+ )
+
+ # Non-existing source key with blocking
+ assert (
+ glide_sync_client.blmove(
+ "{SameSlot}non_existing_key",
+ key1,
+ ListDirection.LEFT,
+ ListDirection.LEFT,
+ 0.1,
+ )
+ is None
+ )
+
+ # Non-list source key with blocking
+ key3 = get_random_string(10)
+ assert glide_sync_client.set(key3, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.blmove(
+ key3, key1, ListDirection.LEFT, ListDirection.LEFT, 0.1
+ )
+
+ # Non-list destination key with blocking
+ with pytest.raises(RequestError):
+ glide_sync_client.blmove(
+ key1, key3, ListDirection.LEFT, ListDirection.LEFT, 0.1
+ )
+
+ # BLMOVE is called against a non-existing key with no timeout, but we wrap the call in an asyncio timeout to
+ # avoid having the test block forever
+ def endless_blmove_call():
+ glide_sync_client.blmove(
+ "{SameSlot}non_existing_key",
+ key2,
+ ListDirection.LEFT,
+ ListDirection.RIGHT,
+ 0,
+ )
+
+ with pytest.raises(asyncio.TimeoutError):
+ asyncio.wait_for(endless_blmove_call(), timeout=3)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lset(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ element = get_random_string(5)
+ values = [get_random_string(5) for _ in range(4)]
+
+ # key does not exist
+ with pytest.raises(RequestError):
+ glide_sync_client.lset("non_existing_key", 0, element)
+
+ # pushing elements to list
+ glide_sync_client.lpush(key, values) == 4
+
+ # index out of range
+ with pytest.raises(RequestError):
+ glide_sync_client.lset(key, 10, element)
+
+ # assert lset result
+ assert glide_sync_client.lset(key, 0, element) == OK
+
+ values = [element] + values[:-1][::-1]
+ assert glide_sync_client.lrange(key, 0, -1) == convert_string_to_bytes_object(
+ values
+ )
+
+ # assert lset with a negative index for the last element in the list
+ assert glide_sync_client.lset(key, -1, element) == OK
+
+ values[-1] = element
+ assert glide_sync_client.lrange(key, 0, -1) == convert_string_to_bytes_object(
+ values
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sadd_srem_smembers_scard(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value_list: List[TEncodable] = ["member1", "member2", "member3", "member4"]
+
+ assert glide_sync_client.sadd(key, value_list) == 4
+ assert glide_sync_client.srem(key, ["member4", "nonExistingMember"]) == 1
+
+ assert set(glide_sync_client.smembers(key)) == set(
+ cast(list, convert_string_to_bytes_object(value_list[:3]))
+ )
+
+ assert glide_sync_client.srem(key, ["member1"]) == 1
+ assert glide_sync_client.scard(key) == 2
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sadd_srem_smembers_scard_non_existing_key(
+ self, glide_sync_client: GlideSync
+ ):
+ non_existing_key = get_random_string(10)
+ assert glide_sync_client.srem(non_existing_key, ["member"]) == 0
+ assert glide_sync_client.scard(non_existing_key) == 0
+ assert glide_sync_client.smembers(non_existing_key) == set()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sadd_srem_smembers_scard_wrong_type_raise_error(
+ self, glide_sync_client: GlideSync
+ ):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "foo") == OK
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.sadd(key, ["bar"])
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.srem(key, ["bar"])
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.scard(key)
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.smembers(key)
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sismember(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ member = get_random_string(5)
+ assert glide_sync_client.sadd(key, [member]) == 1
+ assert glide_sync_client.sismember(key, member)
+ assert not glide_sync_client.sismember(key, get_random_string(5))
+ assert not glide_sync_client.sismember("non_existing_key", member)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_spop(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ member = get_random_string(5)
+ assert glide_sync_client.sadd(key, [member]) == 1
+ assert glide_sync_client.spop(key) == member.encode()
+
+ member2 = get_random_string(5)
+ member3 = get_random_string(5)
+ assert glide_sync_client.sadd(key, [member, member2, member3]) == 3
+ assert glide_sync_client.spop_count(key, 4) == convert_string_to_bytes_object(
+ {member, member2, member3}
+ )
+
+ assert glide_sync_client.scard(key) == 0
+
+ assert glide_sync_client.spop("non_existing_key") == None
+ assert glide_sync_client.spop_count("non_existing_key", 3) == set()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_smove(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ key3 = f"{{testKey}}:3-{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ assert glide_sync_client.sadd(key1, ["1", "2", "3"]) == 3
+ assert glide_sync_client.sadd(key2, ["2", "3"]) == 2
+
+ # move an element
+ assert glide_sync_client.smove(key1, key2, "1") is True
+ assert glide_sync_client.smembers(key1) == convert_string_to_bytes_object(
+ {"2", "3"}
+ )
+ assert glide_sync_client.smembers(key2) == convert_string_to_bytes_object(
+ {"1", "2", "3"}
+ )
+
+ # moved element already exists in the destination set
+ assert glide_sync_client.smove(key2, key1, "2") is True
+ assert glide_sync_client.smembers(key1) == convert_string_to_bytes_object(
+ {"2", "3"}
+ )
+ assert glide_sync_client.smembers(key2) == convert_string_to_bytes_object(
+ {"1", "3"}
+ )
+
+ # attempt to move from a non-existing key
+ assert glide_sync_client.smove(non_existing_key, key1, "4") is False
+ assert glide_sync_client.smembers(key1) == convert_string_to_bytes_object(
+ {"2", "3"}
+ )
+
+ # move to a new set
+ assert glide_sync_client.smove(key1, key3, "2")
+ assert glide_sync_client.smembers(key1) == {b"3"}
+ assert glide_sync_client.smembers(key3) == {b"2"}
+
+ # attempt to move a missing element
+ assert glide_sync_client.smove(key1, key3, "42") is False
+ assert glide_sync_client.smembers(key1) == {b"3"}
+ assert glide_sync_client.smembers(key3) == {b"2"}
+
+ # move missing element to missing key
+ assert glide_sync_client.smove(key1, non_existing_key, "42") is False
+ assert glide_sync_client.smembers(key1) == {b"3"}
+ assert glide_sync_client.type(non_existing_key) == b"none"
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.smove(string_key, key1, "_")
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sunion(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:{get_random_string(10)}"
+ key2 = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:non_existing_key"
+ member1_list: List[TEncodable] = ["a", "b", "c"]
+ member2_list: List[TEncodable] = ["b", "c", "d", "e"]
+
+ assert glide_sync_client.sadd(key1, member1_list) == 3
+ assert glide_sync_client.sadd(key2, member2_list) == 4
+ assert glide_sync_client.sunion([key1, key2]) == {b"a", b"b", b"c", b"d", b"e"}
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.sunion([])
+
+ # non-existing key returns the set of existing keys
+ assert glide_sync_client.sunion(
+ [key1, non_existing_key]
+ ) == convert_string_to_bytes_object(set(cast(List[str], member1_list)))
+
+ # non-set key
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.sunion([key2])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sunionstore(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ key3 = f"{{testKey}}:3-{get_random_string(10)}"
+ key4 = f"{{testKey}}:4-{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ assert glide_sync_client.sadd(key1, ["a", "b", "c"]) == 3
+ assert glide_sync_client.sadd(key2, ["c", "d", "e"]) == 3
+ assert glide_sync_client.sadd(key3, ["e", "f", "g"]) == 3
+
+ # store union in new key
+ assert glide_sync_client.sunionstore(key4, [key1, key2]) == 5
+ assert glide_sync_client.smembers(key4) == convert_string_to_bytes_object(
+ {"a", "b", "c", "d", "e"}
+ )
+
+ # overwrite existing set
+ assert glide_sync_client.sunionstore(key1, [key4, key2]) == 5
+ assert glide_sync_client.smembers(key1) == convert_string_to_bytes_object(
+ {"a", "b", "c", "d", "e"}
+ )
+
+ # overwrite one of the source keys
+ assert glide_sync_client.sunionstore(key2, [key4, key2]) == 5
+ assert glide_sync_client.smembers(key1) == convert_string_to_bytes_object(
+ {"a", "b", "c", "d", "e"}
+ )
+
+ # union with a non existing key
+ assert glide_sync_client.sunionstore(key2, [non_existing_key]) == 0
+ assert glide_sync_client.smembers(key2) == set()
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.sunionstore(key4, [string_key, key1])
+
+ # overwrite destination when destination is not a set
+ assert glide_sync_client.sunionstore(string_key, [key1, key3]) == 7
+ assert glide_sync_client.smembers(
+ string_key
+ ) == convert_string_to_bytes_object(
+ {
+ "a",
+ "b",
+ "c",
+ "d",
+ "e",
+ "f",
+ "g",
+ }
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sinter(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:{get_random_string(10)}"
+ key2 = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:non_existing_key"
+ member1_list: List[TEncodable] = ["a", "b", "c"]
+ member2_list: List[TEncodable] = ["c", "d", "e"]
+
+ # positive test case
+ assert glide_sync_client.sadd(key1, member1_list) == 3
+ assert glide_sync_client.sadd(key2, member2_list) == 3
+ assert glide_sync_client.sinter([key1, key2]) == {b"c"}
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.sinter([])
+
+ # non-existing key returns empty set
+ assert glide_sync_client.sinter([key1, non_existing_key]) == set()
+
+ # non-set key
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.sinter([key2])
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sinterstore(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:{get_random_string(10)}"
+ key2 = f"{{testKey}}:{get_random_string(10)}"
+ key3 = f"{{testKey}}:{get_random_string(10)}"
+ string_key = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:non_existing_key"
+ member1_list: List[TEncodable] = ["a", "b", "c"]
+ member2_list: List[TEncodable] = ["c", "d", "e"]
+
+ assert glide_sync_client.sadd(key1, member1_list) == 3
+ assert glide_sync_client.sadd(key2, member2_list) == 3
+
+ # store in new key
+ assert glide_sync_client.sinterstore(key3, [key1, key2]) == 1
+ assert glide_sync_client.smembers(key3) == {b"c"}
+
+ # overwrite existing set, which is also a source set
+ assert glide_sync_client.sinterstore(key2, [key2, key3]) == 1
+ assert glide_sync_client.smembers(key2) == {b"c"}
+
+ # source set is the same as the existing set
+ assert glide_sync_client.sinterstore(key2, [key2]) == 1
+ assert glide_sync_client.smembers(key2) == {b"c"}
+
+ # intersection with non-existing key
+ assert glide_sync_client.sinterstore(key1, [key2, non_existing_key]) == 0
+ assert glide_sync_client.smembers(key1) == set()
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.sinterstore(key3, [])
+
+ # non-set key
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.sinterstore(key3, [string_key])
+
+ # overwrite non-set key
+ assert glide_sync_client.sinterstore(string_key, [key2]) == 1
+ assert glide_sync_client.smembers(string_key) == {b"c"}
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sintercard(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ key1 = f"{{testKey}}:{get_random_string(10)}"
+ key2 = f"{{testKey}}:{get_random_string(10)}"
+ key3 = f"{{testKey}}:{get_random_string(10)}"
+ string_key = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:non_existing_key"
+ member1_list: List[TEncodable] = ["a", "b", "c"]
+ member2_list: List[TEncodable] = ["b", "c", "d", "e"]
+ member3_list: List[TEncodable] = ["b", "c", "f", "g"]
+
+ assert glide_sync_client.sadd(key1, member1_list) == 3
+ assert glide_sync_client.sadd(key2, member2_list) == 4
+ assert glide_sync_client.sadd(key3, member3_list) == 4
+
+ # Basic intersection
+ assert (
+ glide_sync_client.sintercard([key1, key2]) == 2
+ ) # Intersection of key1 and key2 is {"b", "c"}
+
+ # Intersection with non-existing key
+ assert (
+ glide_sync_client.sintercard([key1, non_existing_key]) == 0
+ ) # No common elements
+
+ # Intersection with a single key
+ assert glide_sync_client.sintercard([key1]) == 3 # All elements in key1
+
+ # Intersection with limit
+ assert (
+ glide_sync_client.sintercard([key1, key2, key3], limit=1) == 1
+ ) # Stops early at limit
+
+ # Invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.sintercard([])
+
+ # Non-set key
+ assert glide_sync_client.set(string_key, "value") == "OK"
+ with pytest.raises(RequestError):
+ glide_sync_client.sintercard([string_key])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sdiff(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ assert glide_sync_client.sadd(key1, ["a", "b", "c"]) == 3
+ assert glide_sync_client.sadd(key2, ["c", "d", "e"]) == 3
+
+ assert glide_sync_client.sdiff([key1, key2]) == convert_string_to_bytes_object(
+ {"a", "b"}
+ )
+ assert glide_sync_client.sdiff([key2, key1]) == convert_string_to_bytes_object(
+ {"d", "e"}
+ )
+
+ assert glide_sync_client.sdiff(
+ [key1, non_existing_key]
+ ) == convert_string_to_bytes_object({"a", "b", "c"})
+ assert glide_sync_client.sdiff([non_existing_key, key1]) == set()
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.sdiff([])
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.sdiff([string_key])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sdiffstore(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ key3 = f"{{testKey}}:3-{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ assert glide_sync_client.sadd(key1, ["a", "b", "c"]) == 3
+ assert glide_sync_client.sadd(key2, ["c", "d", "e"]) == 3
+
+ # Store diff in new key
+ assert glide_sync_client.sdiffstore(key3, [key1, key2]) == 2
+ assert glide_sync_client.smembers(key3) == convert_string_to_bytes_object(
+ {"a", "b"}
+ )
+
+ # Overwrite existing set
+ assert glide_sync_client.sdiffstore(key3, [key2, key1]) == 2
+ assert glide_sync_client.smembers(key3) == convert_string_to_bytes_object(
+ {"d", "e"}
+ )
+
+ # Overwrite one of the source sets
+ assert glide_sync_client.sdiffstore(key3, [key2, key3]) == 1
+ assert glide_sync_client.smembers(key3) == {b"c"}
+
+ # Diff between non-empty set and empty set
+ assert glide_sync_client.sdiffstore(key3, [key1, non_existing_key]) == 3
+ assert glide_sync_client.smembers(key3) == convert_string_to_bytes_object(
+ {"a", "b", "c"}
+ )
+
+ # Diff between empty set and non-empty set
+ assert glide_sync_client.sdiffstore(key3, [non_existing_key, key1]) == 0
+ assert glide_sync_client.smembers(key3) == set()
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.sdiffstore(key3, [])
+
+ # source key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.sdiffstore(key3, [string_key])
+
+ # Overwrite a key holding a non-set value
+ assert glide_sync_client.sdiffstore(string_key, [key1, key2]) == 2
+ assert glide_sync_client.smembers(
+ string_key
+ ) == convert_string_to_bytes_object({"a", "b"})
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_smismember(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ string_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+
+ assert glide_sync_client.sadd(key1, ["one", "two"]) == 2
+ assert glide_sync_client.smismember(key1, ["two", "three"]) == [True, False]
+
+ assert glide_sync_client.smismember(non_existing_key, ["two"]) == [False]
+
+ # invalid argument - member list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.smismember(key1, [])
+
+ # source key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.smismember(string_key, ["two"])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_ltrim(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value_list: List[TEncodable] = ["value4", "value3", "value2", "value1"]
+
+ assert glide_sync_client.lpush(key, value_list) == 4
+ assert glide_sync_client.ltrim(key, 0, 1) == OK
+ assert glide_sync_client.lrange(key, 0, -1) == convert_string_to_bytes_object(
+ ["value1", "value2"]
+ )
+
+ assert glide_sync_client.ltrim(key, 4, 2) == OK
+ assert glide_sync_client.lrange(key, 0, -1) == []
+
+ assert glide_sync_client.ltrim("non_existing_key", 0, 1) == OK
+
+ assert glide_sync_client.set(key, "foo") == OK
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.ltrim(key, 0, 1)
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lrem(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ value_list: List[TEncodable] = [
+ "value1",
+ "value2",
+ "value1",
+ "value1",
+ "value2",
+ ]
+
+ assert glide_sync_client.lpush(key, value_list) == 5
+
+ assert glide_sync_client.lrem(key, 2, "value1") == 2
+ assert glide_sync_client.lrange(key, 0, -1) == convert_string_to_bytes_object(
+ ["value2", "value2", "value1"]
+ )
+
+ assert glide_sync_client.lrem(key, -1, "value2") == 1
+ assert glide_sync_client.lrange(key, 0, -1) == convert_string_to_bytes_object(
+ ["value2", "value1"]
+ )
+
+ assert glide_sync_client.lrem(key, 0, "value2") == 1
+ assert glide_sync_client.lrange(key, 0, -1) == [b"value1"]
+
+ assert glide_sync_client.lrem("non_existing_key", 2, "value") == 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_llen(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+ value_list: List[TEncodable] = ["value4", "value3", "value2", "value1"]
+
+ assert glide_sync_client.lpush(key1, value_list) == 4
+ assert glide_sync_client.llen(key1) == 4
+
+ assert glide_sync_client.llen("non_existing_key") == 0
+
+ assert glide_sync_client.set(key2, "foo") == OK
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.llen(key2)
+ assert "Operation against a key holding the wrong kind of value" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_strlen(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+ value_list: List[TEncodable] = ["value4", "value3", "value2", "value1"]
+
+ assert glide_sync_client.set(key1, "foo") == OK
+ assert glide_sync_client.strlen(key1) == 3
+ assert glide_sync_client.strlen("non_existing_key") == 0
+
+ assert glide_sync_client.lpush(key2, value_list) == 4
+ with pytest.raises(RequestError):
+ assert glide_sync_client.strlen(key2)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_rename(self, glide_sync_client: GlideSync):
+ key1 = "{" + get_random_string(10) + "}"
+ assert glide_sync_client.set(key1, "foo") == OK
+ assert glide_sync_client.rename(key1, key1 + "_rename") == OK
+ assert glide_sync_client.exists([key1 + "_rename"]) == 1
+
+ with pytest.raises(RequestError):
+ assert glide_sync_client.rename(
+ "{same_slot}" + "non_existing_key", "{same_slot}" + "_rename"
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_renamenx(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ key3 = f"{{testKey}}:3-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ # Verify that attempting to rename a non-existing key throws an error
+ with pytest.raises(RequestError):
+ assert glide_sync_client.renamenx(non_existing_key, key1)
+
+ # Test RENAMENX with string values
+ assert glide_sync_client.set(key1, "key1") == OK
+ assert glide_sync_client.set(key3, "key3") == OK
+ # Test that RENAMENX can rename key1 to key2 (where key2 does not yet exist)
+ assert glide_sync_client.renamenx(key1, key2) is True
+ # Verify that key2 now holds the value that was in key1
+ assert glide_sync_client.get(key2) == b"key1"
+ # Verify that RENAMENX doesn't rename key2 to key3, since key3 already exists
+ assert glide_sync_client.renamenx(key2, key3) is False
+ # Verify that key3 remains unchanged
+ assert glide_sync_client.get(key3) == b"key3"
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_exists(self, glide_sync_client: GlideSync):
+ keys = [get_random_string(10), get_random_string(10)]
+
+ assert glide_sync_client.set(keys[0], "value") == OK
+ assert glide_sync_client.exists(keys) == 1
+
+ assert glide_sync_client.set(keys[1], "value") == OK
+ assert glide_sync_client.exists(keys) == 2
+ keys.append("non_existing_key")
+ assert glide_sync_client.exists(keys) == 2
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_unlink(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+ key3 = get_random_string(10)
+
+ assert glide_sync_client.set(key1, "value") == OK
+ assert glide_sync_client.set(key2, "value") == OK
+ assert glide_sync_client.set(key3, "value") == OK
+ assert glide_sync_client.unlink([key1, key2, "non_existing_key", key3]) == 3
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_expire_pexpire_ttl_expiretime_pexpiretime_with_positive_timeout(
+ self, glide_sync_client: GlideSync
+ ):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "foo") == OK
+ assert glide_sync_client.ttl(key) == -1
+
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.expiretime(key) == -1
+ assert glide_sync_client.pexpiretime(key) == -1
+
+ assert glide_sync_client.expire(key, 10) == 1
+ assert glide_sync_client.ttl(key) in range(11)
+
+ # set command clears the timeout.
+ assert glide_sync_client.set(key, "bar") == OK
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.pexpire(key, 10000)
+ else:
+ assert glide_sync_client.pexpire(key, 10000, ExpireOptions.HasNoExpiry)
+ assert glide_sync_client.ttl(key) in range(11)
+
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.expire(key, 15)
+ else:
+ assert glide_sync_client.expire(key, 15, ExpireOptions.HasExistingExpiry)
+ assert glide_sync_client.expiretime(key) > int(time.time())
+ assert glide_sync_client.pexpiretime(key) > (int(time.time()) * 1000)
+ assert glide_sync_client.ttl(key) in range(16)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_expireat_pexpireat_ttl_with_positive_timeout(
+ self, glide_sync_client: GlideSync
+ ):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "foo") == OK
+ current_time = int(time.time())
+
+ assert glide_sync_client.expireat(key, current_time + 10) == 1
+ assert glide_sync_client.ttl(key) in range(11)
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.expireat(key, current_time + 50) == 1
+ else:
+ assert (
+ glide_sync_client.expireat(
+ key, current_time + 50, ExpireOptions.NewExpiryGreaterThanCurrent
+ )
+ == 1
+ )
+ assert glide_sync_client.ttl(key) in range(51)
+
+ # set command clears the timeout.
+ assert glide_sync_client.set(key, "bar") == OK
+ current_time_ms = int(time.time() * 1000)
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert not glide_sync_client.pexpireat(
+ key, current_time_ms + 50000, ExpireOptions.HasExistingExpiry
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_expire_pexpire_expireat_pexpireat_expiretime_pexpiretime_past_or_negative_timeout(
+ self, glide_sync_client: GlideSync
+ ):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "foo") == OK
+ assert glide_sync_client.ttl(key) == -1
+
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.expiretime(key) == -1
+ assert glide_sync_client.pexpiretime(key) == -1
+
+ assert glide_sync_client.expire(key, -10) is True
+ assert glide_sync_client.ttl(key) == -2
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.expiretime(key) == -2
+ assert glide_sync_client.pexpiretime(key) == -2
+
+ assert glide_sync_client.set(key, "foo") == OK
+ assert glide_sync_client.pexpire(key, -10000)
+ assert glide_sync_client.ttl(key) == -2
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.expiretime(key) == -2
+ assert glide_sync_client.pexpiretime(key) == -2
+
+ assert glide_sync_client.set(key, "foo") == OK
+ assert glide_sync_client.expireat(key, int(time.time()) - 50) == 1
+ assert glide_sync_client.ttl(key) == -2
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.expiretime(key) == -2
+ assert glide_sync_client.pexpiretime(key) == -2
+
+ assert glide_sync_client.set(key, "foo") == OK
+ assert glide_sync_client.pexpireat(key, int(time.time() * 1000) - 50000)
+ assert glide_sync_client.ttl(key) == -2
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.expiretime(key) == -2
+ assert glide_sync_client.pexpiretime(key) == -2
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_expire_pexpire_expireAt_pexpireAt_ttl_expiretime_pexpiretime_non_existing_key(
+ self, glide_sync_client: GlideSync
+ ):
+ key = get_random_string(10)
+
+ assert glide_sync_client.expire(key, 10) == 0
+ assert not glide_sync_client.pexpire(key, 10000)
+ assert glide_sync_client.expireat(key, int(time.time()) + 50) == 0
+ assert not glide_sync_client.pexpireat(key, int(time.time() * 1000) + 50000)
+ assert glide_sync_client.ttl(key) == -2
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.expiretime(key) == -2
+ assert glide_sync_client.pexpiretime(key) == -2
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_pttl(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ assert glide_sync_client.pttl(key) == -2
+ current_time = int(time.time())
+
+ assert glide_sync_client.set(key, "value") == OK
+ assert glide_sync_client.pttl(key) == -1
+
+ assert glide_sync_client.expire(key, 10)
+ assert 0 < glide_sync_client.pttl(key) <= 10000
+
+ assert glide_sync_client.expireat(key, current_time + 20)
+ assert 0 < glide_sync_client.pttl(key) <= 20000
+
+ assert glide_sync_client.pexpireat(key, current_time * 1000 + 30000)
+ assert 0 < glide_sync_client.pttl(key) <= 30000
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_persist(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "value") == OK
+ assert not glide_sync_client.persist(key)
+
+ assert glide_sync_client.expire(key, 10)
+ assert glide_sync_client.persist(key)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geoadd(self, glide_sync_client: GlideSync):
+ key, key2 = get_random_string(10), get_random_string(10)
+ members_coordinates: Dict[str | bytes, GeospatialData] = {
+ "Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ }
+ assert glide_sync_client.geoadd(key, members_coordinates) == 2
+ members_coordinates["Catania"].latitude = 39
+ assert (
+ glide_sync_client.geoadd(
+ key,
+ members_coordinates,
+ existing_options=ConditionalChange.ONLY_IF_DOES_NOT_EXIST,
+ )
+ == 0
+ )
+ assert (
+ glide_sync_client.geoadd(
+ key,
+ members_coordinates,
+ existing_options=ConditionalChange.ONLY_IF_EXISTS,
+ )
+ == 0
+ )
+ members_coordinates["Catania"].latitude = 40
+ members_coordinates.update({"Tel-Aviv": GeospatialData(32.0853, 34.7818)})
+ assert (
+ glide_sync_client.geoadd(
+ key,
+ members_coordinates,
+ changed=True,
+ )
+ == 2
+ )
+
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.geoadd(key2, members_coordinates)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geoadd_invalid_args(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+
+ with pytest.raises(RequestError):
+ glide_sync_client.geoadd(key, {})
+
+ with pytest.raises(RequestError):
+ glide_sync_client.geoadd(key, {"Place": GeospatialData(-181, 0)})
+
+ with pytest.raises(RequestError):
+ glide_sync_client.geoadd(key, {"Place": GeospatialData(181, 0)})
+
+ with pytest.raises(RequestError):
+ glide_sync_client.geoadd(key, {"Place": GeospatialData(0, 86)})
+
+ with pytest.raises(RequestError):
+ glide_sync_client.geoadd(key, {"Place": GeospatialData(0, -86)})
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geosearch_by_box(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members = ["Catania", "Palermo", "edge2", "edge1"]
+ members_coordinates: Mapping[TEncodable, GeospatialData] = {
+ "Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ "edge1": GeospatialData(12.758489, 38.788135),
+ "edge2": GeospatialData(17.241510, 38.788135),
+ }
+ result = [
+ [
+ "Catania",
+ [56.4413, 3479447370796909, [15.087267458438873, 37.50266842333162]],
+ ],
+ [
+ "Palermo",
+ [190.4424, 3479099956230698, [13.361389338970184, 38.1155563954963]],
+ ],
+ [
+ "edge2",
+ [279.7403, 3481342659049484, [17.241510450839996, 38.78813451624225]],
+ ],
+ [
+ "edge1",
+ [279.7405, 3479273021651468, [12.75848776102066, 38.78813451624225]],
+ ],
+ ]
+ assert glide_sync_client.geoadd(key, members_coordinates) == 4
+
+ # Test search by box, unit: kilometers, from a geospatial data
+ assert glide_sync_client.geosearch(
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByBox(400, 400, GeoUnit.KILOMETERS),
+ OrderBy.ASC,
+ ) == convert_string_to_bytes_object(members)
+
+ assert glide_sync_client.geosearch(
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByBox(400, 400, GeoUnit.KILOMETERS),
+ OrderBy.DESC,
+ with_coord=True,
+ with_dist=True,
+ with_hash=True,
+ ) == convert_string_to_bytes_object(result[::-1])
+
+ assert glide_sync_client.geosearch(
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByBox(400, 400, GeoUnit.KILOMETERS),
+ OrderBy.ASC,
+ count=GeoSearchCount(1),
+ with_dist=True,
+ with_hash=True,
+ ) == [[b"Catania", [56.4413, 3479447370796909]]]
+
+ # Test search by box, unit: meters, from a member, with distance
+ meters = 400 * 1000
+ assert glide_sync_client.geosearch(
+ key,
+ "Catania",
+ GeoSearchByBox(meters, meters, GeoUnit.METERS),
+ OrderBy.DESC,
+ with_dist=True,
+ ) == convert_string_to_bytes_object(
+ [["edge2", [236529.1799]], ["Palermo", [166274.1516]], ["Catania", [0.0]]]
+ )
+
+ # Test search by box, unit: feet, from a member, with limited count to 2, with hash
+ feet = 400 * 3280.8399
+ assert glide_sync_client.geosearch(
+ key,
+ "Palermo",
+ GeoSearchByBox(feet, feet, GeoUnit.FEET),
+ OrderBy.ASC,
+ count=GeoSearchCount(2),
+ with_hash=True,
+ ) == [[b"Palermo", [3479099956230698]], [b"edge1", [3479273021651468]]]
+
+ # Test search by box, unit: miles, from a geospatial data, with limited ANY count to 1
+ assert (
+ glide_sync_client.geosearch(
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByBox(250, 250, GeoUnit.MILES),
+ OrderBy.ASC,
+ count=GeoSearchCount(1, True),
+ )
+ )[0] in cast(list, convert_string_to_bytes_object(members))
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geosearch_by_radius(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_coordinates: Mapping[TEncodable, GeospatialData] = {
+ "Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ "edge1": GeospatialData(12.758489, 38.788135),
+ "edge2": GeospatialData(17.241510, 38.788135),
+ }
+ result = [
+ [
+ "Catania",
+ [56.4413, 3479447370796909, [15.087267458438873, 37.50266842333162]],
+ ],
+ [
+ "Palermo",
+ [190.4424, 3479099956230698, [13.361389338970184, 38.1155563954963]],
+ ],
+ ]
+ members = ["Catania", "Palermo", "edge2", "edge1"]
+ assert glide_sync_client.geoadd(key, members_coordinates) == 4
+
+ # Test search by radius, units: feet, from a member
+ feet = 200 * 3280.8399
+ assert glide_sync_client.geosearch(
+ key,
+ "Catania",
+ GeoSearchByRadius(feet, GeoUnit.FEET),
+ OrderBy.ASC,
+ ) == convert_string_to_bytes_object(members[:2])
+
+ # Test search by radius, units: meters, from a member
+ meters = 200 * 1000
+ assert glide_sync_client.geosearch(
+ key,
+ "Catania",
+ GeoSearchByRadius(meters, GeoUnit.METERS),
+ OrderBy.DESC,
+ ) == convert_string_to_bytes_object(members[:2][::-1])
+
+ # Test search by radius, unit: miles, from a geospatial data
+ assert glide_sync_client.geosearch(
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByRadius(175, GeoUnit.MILES),
+ OrderBy.DESC,
+ ) == convert_string_to_bytes_object(members[::-1])
+
+ # Test search by radius, unit: kilometers, from a geospatial data, with limited count to 2
+ assert glide_sync_client.geosearch(
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByRadius(200, GeoUnit.KILOMETERS),
+ OrderBy.ASC,
+ count=GeoSearchCount(2),
+ with_coord=True,
+ with_dist=True,
+ with_hash=True,
+ ) == convert_string_to_bytes_object(result)
+
+ # Test search by radius, unit: kilometers, from a geospatial data, with limited ANY count to 1
+ assert (
+ glide_sync_client.geosearch(
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByRadius(200, GeoUnit.KILOMETERS),
+ OrderBy.ASC,
+ count=GeoSearchCount(1, True),
+ with_coord=True,
+ with_dist=True,
+ with_hash=True,
+ )
+ )[0] in cast(list, convert_string_to_bytes_object(result))
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geosearch_no_result(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_coordinates: Mapping[TEncodable, GeospatialData] = {
+ "Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ "edge1": GeospatialData(12.758489, 38.788135),
+ "edge2": GeospatialData(17.241510, 38.788135),
+ }
+ assert glide_sync_client.geoadd(key, members_coordinates) == 4
+
+ # No membes within the aea
+ assert (
+ glide_sync_client.geosearch(
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByBox(50, 50, GeoUnit.METERS),
+ OrderBy.ASC,
+ )
+ == []
+ )
+
+ assert (
+ glide_sync_client.geosearch(
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByRadius(10, GeoUnit.METERS),
+ OrderBy.ASC,
+ )
+ == []
+ )
+
+ # No members in the area (apart from the member we seach fom itself)
+ assert glide_sync_client.geosearch(
+ key,
+ "Catania",
+ GeoSearchByBox(10, 10, GeoUnit.KILOMETERS),
+ ) == [b"Catania"]
+
+ assert glide_sync_client.geosearch(
+ key,
+ "Catania",
+ GeoSearchByRadius(10, GeoUnit.METERS),
+ ) == [b"Catania"]
+
+ # Search from non exiting memeber
+ with pytest.raises(RequestError):
+ glide_sync_client.geosearch(
+ key,
+ "non_existing_member",
+ GeoSearchByBox(10, 10, GeoUnit.MILES),
+ )
+
+ assert glide_sync_client.set(key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.geosearch(
+ key,
+ "Catania",
+ GeoSearchByBox(10, 10, GeoUnit.MILES),
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geosearchstore_by_box(self, glide_sync_client: GlideSync):
+ key = f"{{testKey}}:{get_random_string(10)}"
+ destination_key = f"{{testKey}}:{get_random_string(8)}"
+ members_coordinates: Mapping[TEncodable, GeospatialData] = {
+ "Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ "edge1": GeospatialData(12.758489, 38.788135),
+ "edge2": GeospatialData(17.241510, 38.788135),
+ }
+ result = {
+ b"Catania": [56.4412578701582, 3479447370796909.0],
+ b"Palermo": [190.44242984775784, 3479099956230698.0],
+ b"edge2": [279.7403417843143, 3481342659049484.0],
+ b"edge1": [279.7404521356343, 3479273021651468.0],
+ }
+ assert glide_sync_client.geoadd(key, members_coordinates) == 4
+
+ # Test storing results of a box search, unit: kilometes, from a geospatial data
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByBox(400, 400, GeoUnit.KILOMETERS),
+ )
+ ) == 4 # Number of elements stored
+
+ # Verify the stored results
+ zrange_map = glide_sync_client.zrange_withscores(
+ destination_key, RangeByIndex(0, -1)
+ )
+ expected_map = {member: value[1] for member, value in result.items()}
+ sorted_expected_map = dict(sorted(expected_map.items(), key=lambda x: x[1]))
+ zrange_map = round_values(zrange_map, 10)
+ assert compare_maps(zrange_map, sorted_expected_map) is True
+
+ # Test storing results of a box search, unit: kilometes, from a geospatial data, with distance
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByBox(400, 400, GeoUnit.KILOMETERS),
+ store_dist=True,
+ )
+ ) == 4 # Number of elements stored
+
+ # Verify the stored results
+ zrange_map = glide_sync_client.zrange_withscores(
+ destination_key, RangeByIndex(0, -1)
+ )
+ expected_map = {member: value[0] for member, value in result.items()}
+ sorted_expected_map = dict(sorted(expected_map.items(), key=lambda x: x[1]))
+ zrange_map = round_values(zrange_map, 10)
+ sorted_expected_map = round_values(sorted_expected_map, 10)
+ assert compare_maps(zrange_map, sorted_expected_map) is True
+
+ # Test storing results of a box search, unit: kilometes, from a geospatial data, with count
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByBox(400, 400, GeoUnit.KILOMETERS),
+ count=GeoSearchCount(1),
+ )
+ ) == 1 # Number of elements stored
+
+ # Verify the stored results
+ zrange_map = glide_sync_client.zrange_withscores(
+ destination_key, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_map, {b"Catania": 3479447370796909.0}) is True
+
+ # Test storing results of a box search, unit: meters, from a member, with distance
+ meters = 400 * 1000
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ "Catania",
+ GeoSearchByBox(meters, meters, GeoUnit.METERS),
+ store_dist=True,
+ )
+ ) == 3 # Number of elements stored
+
+ # Verify the stored results with distances
+ zrange_map = glide_sync_client.zrange_withscores(
+ destination_key, RangeByIndex(0, -1)
+ )
+ expected_distances = {
+ b"Catania": 0.0,
+ b"Palermo": 166274.15156960033,
+ b"edge2": 236529.17986494553,
+ }
+ zrange_map = round_values(zrange_map, 9)
+ expected_distances = round_values(expected_distances, 9)
+ assert compare_maps(zrange_map, expected_distances) is True
+
+ # Test search by box, unit: feet, from a member, with limited ANY count to 2, with hash
+ feet = 400 * 3280.8399
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ "Palermo",
+ GeoSearchByBox(feet, feet, GeoUnit.FEET),
+ count=GeoSearchCount(2),
+ )
+ == 2
+ )
+
+ # Verify the stored results
+ zrange_map = glide_sync_client.zrange_withscores(
+ destination_key, RangeByIndex(0, -1)
+ )
+ for member in zrange_map:
+ assert member in result
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geosearchstore_by_radius(self, glide_sync_client: GlideSync):
+ key = f"{{testKey}}:{get_random_string(10)}"
+ destination_key = f"{{testKey}}:{get_random_string(8)}"
+ # Checking when parts of the value contain bytes
+ members_coordinates: Mapping[TEncodable, GeospatialData] = {
+ b"Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ b"edge1": GeospatialData(12.758489, 38.788135),
+ "edge2": GeospatialData(17.241510, 38.788135),
+ }
+ result = {
+ b"Catania": [56.4412578701582, 3479447370796909.0],
+ b"Palermo": [190.44242984775784, 3479099956230698.0],
+ }
+ assert glide_sync_client.geoadd(key, members_coordinates) == 4
+
+ # Test storing results of a radius search, unit: feet, from a member
+ feet = 200 * 3280.8399
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ "Catania",
+ GeoSearchByRadius(feet, GeoUnit.FEET),
+ )
+ == 2
+ )
+
+ # Verify the stored results
+ zrange_map = glide_sync_client.zrange_withscores(
+ destination_key, RangeByIndex(0, -1)
+ )
+ expected_map = {member: value[1] for member, value in result.items()}
+ sorted_expected_map = dict(sorted(expected_map.items(), key=lambda x: x[1]))
+ assert compare_maps(zrange_map, sorted_expected_map) is True
+
+ # Test search by radius, units: meters, from a member
+ meters = 200 * 1000
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ "Catania",
+ GeoSearchByRadius(meters, GeoUnit.METERS),
+ store_dist=True,
+ )
+ == 2
+ )
+
+ # Verify the stored results
+ zrange_map = glide_sync_client.zrange_withscores(
+ destination_key, RangeByIndex(0, -1)
+ )
+ expected_distances = {
+ b"Catania": 0.0,
+ b"Palermo": 166274.15156960033,
+ }
+ zrange_map = round_values(zrange_map, 9)
+ expected_distances = round_values(expected_distances, 9)
+ assert compare_maps(zrange_map, expected_distances) is True
+
+ # Test search by radius, unit: miles, from a geospatial data
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByRadius(175, GeoUnit.MILES),
+ )
+ == 4
+ )
+
+ # Test storing results of a radius search, unit: kilometers, from a geospatial data, with limited count to 2
+ kilometers = 200
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByRadius(kilometers, GeoUnit.KILOMETERS),
+ count=GeoSearchCount(2),
+ store_dist=True,
+ )
+ == 2
+ )
+
+ # Verify the stored results
+ zrange_map = glide_sync_client.zrange_withscores(
+ destination_key, RangeByIndex(0, -1)
+ )
+ expected_map = {member: value[0] for member, value in result.items()}
+ sorted_expected_map = dict(sorted(expected_map.items(), key=lambda x: x[1]))
+ zrange_map = round_values(zrange_map, 10)
+ sorted_expected_map = round_values(sorted_expected_map, 10)
+ assert compare_maps(zrange_map, sorted_expected_map) is True
+
+ # Test storing results of a radius search, unit: kilometers, from a geospatial data, with limited ANY count to 1
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByRadius(kilometers, GeoUnit.KILOMETERS),
+ count=GeoSearchCount(1, True),
+ )
+ == 1
+ )
+
+ # Verify the stored results
+ zrange_map = glide_sync_client.zrange_withscores(
+ destination_key, RangeByIndex(0, -1)
+ )
+
+ for member in zrange_map:
+ assert member in result
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geosearchstore_no_result(self, glide_sync_client: GlideSync):
+ key = f"{{testKey}}:{get_random_string(10)}"
+ destination_key = f"{{testKey}}:{get_random_string(8)}"
+ members_coordinates: Mapping[TEncodable, GeospatialData] = {
+ "Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ "edge1": GeospatialData(12.758489, 38.788135),
+ "edge2": GeospatialData(17.241510, 38.788135),
+ }
+ assert glide_sync_client.geoadd(key, members_coordinates) == 4
+
+ # No members within the area
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByBox(50, 50, GeoUnit.METERS),
+ )
+ == 0
+ )
+
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ GeospatialData(15, 37),
+ GeoSearchByRadius(10, GeoUnit.METERS),
+ )
+ == 0
+ )
+
+ # No members in the area (apart from the member we search from itself)
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ "Catania",
+ GeoSearchByBox(10, 10, GeoUnit.KILOMETERS),
+ )
+ == 1
+ )
+
+ assert (
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ "Catania",
+ GeoSearchByRadius(10, GeoUnit.METERS),
+ )
+ == 1
+ )
+
+ # Search from non-existing member
+ with pytest.raises(RequestError):
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ "non_existing_member",
+ GeoSearchByBox(10, 10, GeoUnit.MILES),
+ )
+
+ assert glide_sync_client.set(key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.geosearchstore(
+ destination_key,
+ key,
+ "Catania",
+ GeoSearchByBox(10, 10, GeoUnit.MILES),
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geohash(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_coordinates: Mapping[TEncodable, GeospatialData] = {
+ "Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ }
+ assert glide_sync_client.geoadd(key, members_coordinates) == 2
+ assert glide_sync_client.geohash(
+ key, ["Palermo", "Catania", "Place"]
+ ) == convert_string_to_bytes_object(
+ [
+ "sqc8b49rny0",
+ "sqdtr74hyu0",
+ None,
+ ]
+ )
+
+ assert (
+ glide_sync_client.geohash(
+ "non_existing_key", ["Palermo", "Catania", "Place"]
+ )
+ == [None] * 3
+ )
+
+ # Neccessary to check since we are enforcing the user to pass a list of members while valkey don't
+ # But when running the command with key only (and no members) the returned value will always be an empty list
+ # So in case of any changes, this test will fail and inform us that we should allow not passing any members.
+ assert glide_sync_client.geohash(key, []) == []
+
+ assert glide_sync_client.set(key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.geohash(key, ["Palermo", "Catania"])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geodist(self, glide_sync_client: GlideSync):
+ key, key2 = get_random_string(10), get_random_string(10)
+ members_coordinates: Mapping[TEncodable, GeospatialData] = {
+ "Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ }
+ assert glide_sync_client.geoadd(key, members_coordinates) == 2
+
+ assert glide_sync_client.geodist(key, "Palermo", "Catania") == 166274.1516
+ assert (
+ glide_sync_client.geodist(key, "Palermo", "Catania", GeoUnit.KILOMETERS)
+ == 166.2742
+ )
+ assert glide_sync_client.geodist(key, "Palermo", "Palermo", GeoUnit.MILES) == 0
+ assert (
+ glide_sync_client.geodist(
+ key, "Palermo", "non-existing-member", GeoUnit.FEET
+ )
+ == None
+ )
+
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.geodist(key2, "Palmero", "Catania")
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_geopos(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_coordinates: Mapping[TEncodable, GeospatialData] = {
+ "Palermo": GeospatialData(13.361389, 38.115556),
+ "Catania": GeospatialData(15.087269, 37.502669),
+ }
+ assert glide_sync_client.geoadd(key, members_coordinates) == 2
+
+ # The comparison allows for a small tolerance level due to potential precision errors in floating-point calculations
+ # No worries, Python can handle it, therefore, this shouldn't fail
+ positions = glide_sync_client.geopos(key, ["Palermo", "Catania", "Place"])
+ expected_positions = [
+ [13.36138933897018433, 38.11555639549629859],
+ [15.08726745843887329, 37.50266842333162032],
+ ]
+ assert len(positions) == 3 and positions[2] is None
+
+ assert all(
+ all(
+ math.isclose(actual_coord, expected_coord)
+ for actual_coord, expected_coord in zip(actual_pos, expected_pos)
+ )
+ for actual_pos, expected_pos in zip(positions, expected_positions)
+ if actual_pos is not None
+ )
+
+ assert (
+ glide_sync_client.geopos(
+ "non_existing_key", ["Palermo", "Catania", "Place"]
+ )
+ == [None] * 3
+ )
+
+ # Neccessary to check since we are enforcing the user to pass a list of members while valkey don't
+ # But when running the command with key only (and no members) the returned value will always be an empty list
+ # So in case of any changes, this test will fail and inform us that we should allow not passing any members.
+ assert glide_sync_client.geohash(key, []) == []
+
+ assert glide_sync_client.set(key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.geopos(key, ["Palermo", "Catania"])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zadd_zaddincr(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+ assert glide_sync_client.zadd_incr(key, member="one", increment=2) == 3.0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zadd_nx_xx(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+ assert (
+ glide_sync_client.zadd(
+ key,
+ members_scores=members_scores,
+ existing_options=ConditionalChange.ONLY_IF_EXISTS,
+ )
+ == 0
+ )
+ assert (
+ glide_sync_client.zadd(
+ key,
+ members_scores=members_scores,
+ existing_options=ConditionalChange.ONLY_IF_DOES_NOT_EXIST,
+ )
+ == 3
+ )
+
+ assert (
+ glide_sync_client.zadd_incr(
+ key,
+ member="one",
+ increment=5.0,
+ existing_options=ConditionalChange.ONLY_IF_DOES_NOT_EXIST,
+ )
+ is None
+ )
+
+ assert (
+ glide_sync_client.zadd_incr(
+ key,
+ member="one",
+ increment=5.0,
+ existing_options=ConditionalChange.ONLY_IF_EXISTS,
+ )
+ == 6.0
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zadd_gt_lt(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Dict[TEncodable, float] = {"one": -3, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+ members_scores["one"] = 10
+ assert (
+ glide_sync_client.zadd(
+ key,
+ members_scores=members_scores,
+ update_condition=UpdateOptions.GREATER_THAN,
+ changed=True,
+ )
+ == 1
+ )
+
+ assert (
+ glide_sync_client.zadd(
+ key,
+ members_scores=members_scores,
+ update_condition=UpdateOptions.LESS_THAN,
+ changed=True,
+ )
+ == 0
+ )
+
+ assert (
+ glide_sync_client.zadd_incr(
+ key,
+ member="one",
+ increment=-3.0,
+ update_condition=UpdateOptions.LESS_THAN,
+ )
+ == 7.0
+ )
+
+ assert (
+ glide_sync_client.zadd_incr(
+ key,
+ member="one",
+ increment=-3.0,
+ update_condition=UpdateOptions.GREATER_THAN,
+ )
+ is None
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zincrby(self, glide_sync_client: GlideSync):
+ key, member, member2 = (
+ get_random_string(10),
+ get_random_string(5),
+ get_random_string(5),
+ )
+
+ # key does not exist
+ assert glide_sync_client.zincrby(key, 2.5, member) == 2.5
+ assert glide_sync_client.zscore(key, member) == 2.5
+
+ # key exists, but value doesn't
+ assert glide_sync_client.zincrby(key, -3.3, member2) == -3.3
+ assert glide_sync_client.zscore(key, member2) == -3.3
+
+ # updating existing value in existing key
+ assert glide_sync_client.zincrby(key, 1.0, member) == 3.5
+ assert glide_sync_client.zscore(key, member) == 3.5
+
+ # Key exists, but it is not a sorted set
+ assert glide_sync_client.set(key, "_") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zincrby(key, 0.5, "_")
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrem(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+
+ assert glide_sync_client.zrem(key, ["one"]) == 1
+ assert glide_sync_client.zrem(key, ["one", "two", "three"]) == 2
+
+ assert glide_sync_client.zrem("non_existing_set", ["member"]) == 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zremrangebyscore(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores) == 3
+
+ assert (
+ glide_sync_client.zremrangebyscore(
+ key, ScoreBoundary(1, False), ScoreBoundary(2)
+ )
+ == 1
+ )
+ assert (
+ glide_sync_client.zremrangebyscore(key, ScoreBoundary(1), InfBound.NEG_INF)
+ == 0
+ )
+ assert (
+ glide_sync_client.zremrangebyscore(
+ "non_existing_set", InfBound.NEG_INF, InfBound.POS_INF
+ )
+ == 0
+ )
+
+ assert glide_sync_client.set(key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zremrangebyscore(key, InfBound.NEG_INF, InfBound.POS_INF)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zremrangebylex(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+ range = RangeByIndex(0, -1)
+ members_scores: Mapping[TEncodable, float] = {"a": 1, "b": 2, "c": 3, "d": 4}
+ assert glide_sync_client.zadd(key1, members_scores) == 4
+
+ assert (
+ glide_sync_client.zremrangebylex(
+ key1, LexBoundary("a", False), LexBoundary("c")
+ )
+ == 2
+ )
+ zremrangebylex_res = glide_sync_client.zrange_withscores(key1, range)
+ assert compare_maps(zremrangebylex_res, {"a": 1.0, "d": 4.0}) is True
+
+ assert (
+ glide_sync_client.zremrangebylex(key1, LexBoundary("d"), InfBound.POS_INF)
+ == 1
+ )
+ assert glide_sync_client.zrange_withscores(key1, range) == {b"a": 1.0}
+
+ # min_lex > max_lex
+ assert (
+ glide_sync_client.zremrangebylex(key1, LexBoundary("a"), InfBound.NEG_INF)
+ == 0
+ )
+ assert glide_sync_client.zrange_withscores(key1, range) == {b"a": 1.0}
+
+ assert (
+ glide_sync_client.zremrangebylex(
+ "non_existing_key", InfBound.NEG_INF, InfBound.POS_INF
+ )
+ == 0
+ )
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zremrangebylex(
+ key2, LexBoundary("a", False), LexBoundary("c")
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zremrangebyrank(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+ range = RangeByIndex(0, -1)
+ members_scores: Mapping[TEncodable, float] = {
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ "d": 4,
+ "e": 5,
+ }
+ assert glide_sync_client.zadd(key1, members_scores) == 5
+
+ # Test start exceeding end
+ assert glide_sync_client.zremrangebyrank(key1, 2, 1) == 0
+
+ # Test removing elements by rank
+ assert glide_sync_client.zremrangebyrank(key1, 0, 2) == 3
+ zremrangebyrank_res = glide_sync_client.zrange_withscores(key1, range)
+ assert compare_maps(zremrangebyrank_res, {"d": 4.0, "e": 5.0}) is True
+
+ # Test removing elements beyond the existing range
+ assert glide_sync_client.zremrangebyrank(key1, 0, 10) == 2
+ assert glide_sync_client.zrange_withscores(key1, range) == {}
+
+ # Test with non-existing key
+ assert glide_sync_client.zremrangebyrank("non_existing_key", 0, 1) == 0
+
+ # Key exists, but it is not a sorted set
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zremrangebyrank(key2, 0, 1)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zlexcount(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"a": 1.0, "b": 2.0, "c": 3.0}
+
+ assert glide_sync_client.zadd(key1, members_scores) == 3
+ assert (
+ glide_sync_client.zlexcount(key1, InfBound.NEG_INF, InfBound.POS_INF) == 3
+ )
+ assert (
+ glide_sync_client.zlexcount(
+ key1,
+ LexBoundary("a", is_inclusive=False),
+ LexBoundary("c", is_inclusive=True),
+ )
+ == 2
+ )
+ assert (
+ glide_sync_client.zlexcount(
+ key1, InfBound.NEG_INF, LexBoundary("c", is_inclusive=True)
+ )
+ == 3
+ )
+ # Incorrect range; start > end
+ assert (
+ glide_sync_client.zlexcount(
+ key1, InfBound.POS_INF, LexBoundary("c", is_inclusive=True)
+ )
+ == 0
+ )
+ assert (
+ glide_sync_client.zlexcount(
+ "non_existing_key", InfBound.NEG_INF, InfBound.POS_INF
+ )
+ == 0
+ )
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zlexcount(key2, InfBound.NEG_INF, InfBound.POS_INF)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zcard(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+ assert glide_sync_client.zcard(key) == 3
+
+ assert glide_sync_client.zrem(key, ["one"]) == 1
+ assert glide_sync_client.zcard(key) == 2
+ assert glide_sync_client.zcard("non_existing_key") == 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zcount(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+
+ assert glide_sync_client.zcount(key, InfBound.NEG_INF, InfBound.POS_INF) == 3
+ assert (
+ glide_sync_client.zcount(
+ key,
+ ScoreBoundary(1, is_inclusive=False),
+ ScoreBoundary(3, is_inclusive=False),
+ )
+ == 1
+ )
+ assert (
+ glide_sync_client.zcount(
+ key,
+ ScoreBoundary(1, is_inclusive=False),
+ ScoreBoundary(3, is_inclusive=True),
+ )
+ == 2
+ )
+ assert (
+ glide_sync_client.zcount(
+ key, InfBound.NEG_INF, ScoreBoundary(3, is_inclusive=True)
+ )
+ == 3
+ )
+ assert (
+ glide_sync_client.zcount(
+ key, InfBound.POS_INF, ScoreBoundary(3, is_inclusive=True)
+ )
+ == 0
+ )
+ assert (
+ glide_sync_client.zcount(
+ "non_existing_key", InfBound.NEG_INF, InfBound.POS_INF
+ )
+ == 0
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zscore(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+ assert glide_sync_client.zscore(key, "one") == 1.0
+
+ assert glide_sync_client.zscore(key, "non_existing_member") is None
+ assert (
+ glide_sync_client.zscore("non_existing_key", "non_existing_member") is None
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zmscore(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+
+ assert glide_sync_client.zadd(key1, members_scores=members_scores) == 3
+ assert glide_sync_client.zmscore(key1, ["one", "two", "three"]) == [
+ 1.0,
+ 2.0,
+ 3.0,
+ ]
+ assert glide_sync_client.zmscore(
+ key1, ["one", "non_existing_member", "non_existing_member", "three"]
+ ) == [1.0, None, None, 3.0]
+ assert glide_sync_client.zmscore("non_existing_key", ["one"]) == [None]
+
+ assert glide_sync_client.set(key2, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zmscore(key2, ["one"])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zinter_commands(self, glide_sync_client: GlideSync):
+ key1 = "{testKey}:1-" + get_random_string(10)
+ key2 = "{testKey}:2-" + get_random_string(10)
+ key3 = "{testKey}:3-" + get_random_string(10)
+ range = RangeByIndex(0, -1)
+ members_scores1: Mapping[TEncodable, float] = {"one": 1.0, "two": 2.0}
+ members_scores2: Mapping[TEncodable, float] = {
+ "one": 1.5,
+ "two": 2.5,
+ "three": 3.5,
+ }
+
+ assert glide_sync_client.zadd(key1, members_scores1) == 2
+ assert glide_sync_client.zadd(key2, members_scores2) == 3
+
+ # zinter tests
+ zinter_map = glide_sync_client.zinter([key1, key2])
+ expected_zinter_map = [b"one", b"two"]
+ assert zinter_map == expected_zinter_map
+
+ # zinterstore tests
+ assert glide_sync_client.zinterstore(key3, [key1, key2]) == 2
+ zinterstore_map = glide_sync_client.zrange_withscores(key3, range)
+ expected_zinter_map_withscores = {
+ b"one": 2.5,
+ b"two": 4.5,
+ }
+ assert compare_maps(zinterstore_map, expected_zinter_map_withscores) is True
+
+ # zinter_withscores tests
+ zinter_withscores_map = glide_sync_client.zinter_withscores([key1, key2])
+ assert (
+ compare_maps(zinter_withscores_map, expected_zinter_map_withscores) is True
+ )
+
+ # MAX aggregation tests
+ assert (
+ glide_sync_client.zinterstore(key3, [key1, key2], AggregationType.MAX) == 2
+ )
+ zinterstore_map_max = glide_sync_client.zrange_withscores(key3, range)
+ expected_zinter_map_max = {
+ b"one": 1.5,
+ b"two": 2.5,
+ }
+ assert compare_maps(zinterstore_map_max, expected_zinter_map_max) is True
+
+ zinter_withscores_map_max = glide_sync_client.zinter_withscores(
+ [key1, key2], AggregationType.MAX
+ )
+ assert compare_maps(zinter_withscores_map_max, expected_zinter_map_max) is True
+
+ # MIN aggregation tests
+ assert (
+ glide_sync_client.zinterstore(key3, [key1, key2], AggregationType.MIN) == 2
+ )
+ zinterstore_map_min = glide_sync_client.zrange_withscores(key3, range)
+ expected_zinter_map_min = {
+ b"one": 1.0,
+ b"two": 2.0,
+ }
+ assert compare_maps(zinterstore_map_min, expected_zinter_map_min) is True
+
+ zinter_withscores_map_min = glide_sync_client.zinter_withscores(
+ [key1, key2], AggregationType.MIN
+ )
+ assert compare_maps(zinter_withscores_map_min, expected_zinter_map_min) is True
+
+ # SUM aggregation tests
+ assert (
+ glide_sync_client.zinterstore(key3, [key1, key2], AggregationType.SUM) == 2
+ )
+ zinterstore_map_sum = glide_sync_client.zrange_withscores(key3, range)
+ assert compare_maps(zinterstore_map_sum, expected_zinter_map_withscores) is True
+
+ zinter_withscores_map_sum = glide_sync_client.zinter_withscores(
+ [key1, key2], AggregationType.SUM
+ )
+ assert (
+ compare_maps(zinter_withscores_map_sum, expected_zinter_map_withscores)
+ is True
+ )
+
+ # Multiplying scores during aggregation tests
+ assert (
+ glide_sync_client.zinterstore(
+ key3, [(key1, 2.0), (key2, 2.0)], AggregationType.SUM
+ )
+ == 2
+ )
+ zinterstore_map_multiplied = glide_sync_client.zrange_withscores(key3, range)
+ expected_zinter_map_multiplied = {
+ b"one": 5.0,
+ b"two": 9.0,
+ }
+ assert (
+ compare_maps(zinterstore_map_multiplied, expected_zinter_map_multiplied)
+ is True
+ )
+
+ zinter_withscores_map_multiplied = glide_sync_client.zinter_withscores(
+ [(key1, 2.0), (key2, 2.0)], AggregationType.SUM
+ )
+ assert (
+ compare_maps(
+ zinter_withscores_map_multiplied, expected_zinter_map_multiplied
+ )
+ is True
+ )
+
+ # Non-existing key test
+ assert (
+ glide_sync_client.zinterstore(key3, [key1, "{testKey}-non_existing_key"])
+ == 0
+ )
+ zinter_withscores_non_existing = glide_sync_client.zinter_withscores(
+ [key1, "{testKey}-non_existing_key"]
+ )
+ assert zinter_withscores_non_existing == {}
+
+ # Empty list check
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.zinterstore(
+ "{xyz}", cast(List[TEncodable], cast(List[TEncodable], []))
+ )
+ assert "wrong number of arguments" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.zinter([])
+ assert "wrong number of arguments" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.zinter_withscores(cast(List[TEncodable], []))
+ assert "at least 1 input key is needed" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zunion_commands(self, glide_sync_client: GlideSync):
+ key1 = "{testKey}:1-" + get_random_string(10)
+ key2 = "{testKey}:2-" + get_random_string(10)
+ key3 = "{testKey}:3-" + get_random_string(10)
+ range = RangeByIndex(0, -1)
+ members_scores1: Mapping[TEncodable, float] = {"one": 1.0, "two": 2.0}
+ members_scores2: Mapping[TEncodable, float] = {
+ "one": 1.5,
+ "two": 2.5,
+ "three": 3.5,
+ }
+
+ assert glide_sync_client.zadd(key1, members_scores1) == 2
+ assert glide_sync_client.zadd(key2, members_scores2) == 3
+
+ # zunion tests
+ zunion_map = glide_sync_client.zunion([key1, key2])
+ expected_zunion_map = [b"one", b"three", b"two"]
+ assert zunion_map == expected_zunion_map
+
+ # zunionstore tests
+ assert glide_sync_client.zunionstore(key3, [key1, key2]) == 3
+ zunionstore_map = glide_sync_client.zrange_withscores(key3, range)
+ expected_zunion_map_withscores = {
+ b"one": 2.5,
+ b"three": 3.5,
+ b"two": 4.5,
+ }
+ assert compare_maps(zunionstore_map, expected_zunion_map_withscores) is True
+
+ # zunion_withscores tests
+ zunion_withscores_map = glide_sync_client.zunion_withscores([key1, key2])
+ assert (
+ compare_maps(zunion_withscores_map, expected_zunion_map_withscores) is True
+ )
+
+ # MAX aggregation tests
+ assert (
+ glide_sync_client.zunionstore(key3, [key1, key2], AggregationType.MAX) == 3
+ )
+ zunionstore_map_max = glide_sync_client.zrange_withscores(key3, range)
+ expected_zunion_map_max = {
+ b"one": 1.5,
+ b"two": 2.5,
+ b"three": 3.5,
+ }
+ assert compare_maps(zunionstore_map_max, expected_zunion_map_max) is True
+
+ zunion_withscores_map_max = glide_sync_client.zunion_withscores(
+ [key1, key2], AggregationType.MAX
+ )
+ assert compare_maps(zunion_withscores_map_max, expected_zunion_map_max) is True
+
+ # MIN aggregation tests
+ assert (
+ glide_sync_client.zunionstore(key3, [key1, key2], AggregationType.MIN) == 3
+ )
+ zunionstore_map_min = glide_sync_client.zrange_withscores(key3, range)
+ expected_zunion_map_min = {
+ b"one": 1.0,
+ b"two": 2.0,
+ b"three": 3.5,
+ }
+ assert compare_maps(zunionstore_map_min, expected_zunion_map_min) is True
+
+ zunion_withscores_map_min = glide_sync_client.zunion_withscores(
+ [key1, key2], AggregationType.MIN
+ )
+ assert compare_maps(zunion_withscores_map_min, expected_zunion_map_min) is True
+
+ # SUM aggregation tests
+ assert (
+ glide_sync_client.zunionstore(key3, [key1, key2], AggregationType.SUM) == 3
+ )
+ zunionstore_map_sum = glide_sync_client.zrange_withscores(key3, range)
+ assert compare_maps(zunionstore_map_sum, expected_zunion_map_withscores) is True
+
+ zunion_withscores_map_sum = glide_sync_client.zunion_withscores(
+ [key1, key2], AggregationType.SUM
+ )
+ assert (
+ compare_maps(zunion_withscores_map_sum, expected_zunion_map_withscores)
+ is True
+ )
+
+ # Multiplying scores during aggregation tests
+ assert (
+ glide_sync_client.zunionstore(
+ key3, [(key1, 2.0), (key2, 2.0)], AggregationType.SUM
+ )
+ == 3
+ )
+ zunionstore_map_multiplied = glide_sync_client.zrange_withscores(key3, range)
+ expected_zunion_map_multiplied = {
+ b"one": 5.0,
+ b"three": 7.0,
+ b"two": 9.0,
+ }
+ assert (
+ compare_maps(zunionstore_map_multiplied, expected_zunion_map_multiplied)
+ is True
+ )
+
+ zunion_withscores_map_multiplied = glide_sync_client.zunion_withscores(
+ [(key1, 2.0), (key2, 2.0)], AggregationType.SUM
+ )
+ assert (
+ compare_maps(
+ zunion_withscores_map_multiplied, expected_zunion_map_multiplied
+ )
+ is True
+ )
+
+ # Non-existing key test
+ assert (
+ glide_sync_client.zunionstore(key3, [key1, "{testKey}-non_existing_key"])
+ == 2
+ )
+ zunionstore_map_nonexistingkey = glide_sync_client.zrange_withscores(
+ key3, range
+ )
+ expected_zunion_map_nonexistingkey = {
+ b"one": 1.0,
+ b"two": 2.0,
+ }
+ assert (
+ compare_maps(
+ zunionstore_map_nonexistingkey, expected_zunion_map_nonexistingkey
+ )
+ is True
+ )
+
+ zunion_withscores_non_existing = glide_sync_client.zunion_withscores(
+ [key1, "{testKey}-non_existing_key"]
+ )
+ assert (
+ compare_maps(
+ zunion_withscores_non_existing, expected_zunion_map_nonexistingkey
+ )
+ is True
+ )
+
+ # Empty list check
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.zunionstore("{xyz}", cast(List[TEncodable], []))
+ assert "wrong number of arguments" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.zunion([])
+ assert "wrong number of arguments" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.zunion_withscores(cast(List[TEncodable], []))
+ assert "at least 1 input key is needed" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zpopmin(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"a": 1.0, "b": 2.0, "c": 3.0}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+ assert glide_sync_client.zpopmin(key) == {b"a": 1.0}
+
+ zpopmin_map = glide_sync_client.zpopmin(key, 3)
+ expected_map = {b"b": 2.0, b"c": 3.0}
+ assert compare_maps(zpopmin_map, expected_map) is True
+
+ assert glide_sync_client.zpopmin(key) == {}
+ assert glide_sync_client.set(key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zpopmin(key)
+
+ assert glide_sync_client.zpopmin("non_exisitng_key") == {}
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_bzpopmin(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:{get_random_string(10)}"
+ key2 = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:non_existing_key"
+
+ assert glide_sync_client.zadd(key1, {"a": 1.0, "b": 1.5}) == 2
+ assert glide_sync_client.zadd(key2, {"c": 2.0}) == 1
+ assert glide_sync_client.bzpopmin(
+ [key1, key2], 0.5
+ ) == convert_string_to_bytes_object([key1, "a", 1.0])
+ assert glide_sync_client.bzpopmin(
+ [non_existing_key, key2], 0.5
+ ) == convert_string_to_bytes_object(
+ [
+ key2,
+ "c",
+ 2.0,
+ ]
+ )
+ assert glide_sync_client.bzpopmin(["non_existing_key"], 0.5) is None
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.bzpopmin([], 0.5)
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set("foo", "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.bzpopmin(["foo"], 0.5)
+
+ def endless_bzpopmin_call():
+ glide_sync_client.bzpopmin(["non_existent_key"], 0)
+
+ # bzpopmin is called against a non-existing key with no timeout, but we wrap the call in an asyncio timeout to
+ # avoid having the test block forever
+ with pytest.raises(asyncio.TimeoutError):
+ asyncio.wait_for(endless_bzpopmin_call(), timeout=0.5)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zpopmax(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"a": 1.0, "b": 2.0, "c": 3.0}
+ assert glide_sync_client.zadd(key, members_scores) == 3
+ assert glide_sync_client.zpopmax(key) == {b"c": 3.0}
+
+ zpopmax_map = glide_sync_client.zpopmax(key, 3)
+ expected_map = {"b": 2.0, "a": 1.0}
+ assert compare_maps(zpopmax_map, expected_map) is True
+
+ assert glide_sync_client.zpopmax(key) == {}
+ assert glide_sync_client.set(key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zpopmax(key)
+
+ assert glide_sync_client.zpopmax("non_exisitng_key") == {}
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_bzpopmax(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:{get_random_string(10)}"
+ key2 = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:non_existing_key"
+
+ assert glide_sync_client.zadd(key1, {"a": 1.0, "b": 1.5}) == 2
+ assert glide_sync_client.zadd(key2, {"c": 2.0}) == 1
+ assert glide_sync_client.bzpopmax(
+ [key1, key2], 0.5
+ ) == convert_string_to_bytes_object([key1, "b", 1.5])
+ assert glide_sync_client.bzpopmax(
+ [non_existing_key, key2], 0.5
+ ) == convert_string_to_bytes_object(
+ [
+ key2,
+ "c",
+ 2.0,
+ ]
+ )
+ assert glide_sync_client.bzpopmax(["non_existing_key"], 0.5) is None
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.bzpopmax([], 0.5)
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set("foo", "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.bzpopmax(["foo"], 0.5)
+
+ def endless_bzpopmax_call():
+ glide_sync_client.bzpopmax(["non_existent_key"], 0)
+
+ # bzpopmax is called against a non-existing key with no timeout, but we wrap the call in an asyncio timeout to
+ # avoid having the test block forever
+ with pytest.raises(asyncio.TimeoutError):
+ asyncio.wait_for(endless_bzpopmax_call(), timeout=0.5)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrange_by_index(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+
+ assert glide_sync_client.zrange(key, RangeByIndex(0, 1)) == [
+ b"one",
+ b"two",
+ ]
+
+ zrange_map = glide_sync_client.zrange_withscores(key, RangeByIndex(0, -1))
+ expected_map = {b"one": 1.0, b"two": 2.0, b"three": 3.0}
+ assert compare_maps(zrange_map, expected_map) is True
+
+ assert glide_sync_client.zrange(key, RangeByIndex(0, 1), reverse=True) == [
+ b"three",
+ b"two",
+ ]
+
+ assert glide_sync_client.zrange(key, RangeByIndex(3, 1)) == []
+ assert glide_sync_client.zrange_withscores(key, RangeByIndex(3, 1)) == {}
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrange_byscore(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+
+ assert glide_sync_client.zrange(
+ key,
+ RangeByScore(InfBound.NEG_INF, ScoreBoundary(3, is_inclusive=False)),
+ ) == [b"one", b"two"]
+
+ zrange_map = glide_sync_client.zrange_withscores(
+ key,
+ RangeByScore(InfBound.NEG_INF, InfBound.POS_INF),
+ )
+ expected_map = {b"one": 1.0, b"two": 2.0, b"three": 3.0}
+ assert compare_maps(zrange_map, expected_map) is True
+
+ assert glide_sync_client.zrange(
+ key,
+ RangeByScore(ScoreBoundary(3, is_inclusive=False), InfBound.NEG_INF),
+ reverse=True,
+ ) == [b"two", b"one"]
+
+ assert (
+ glide_sync_client.zrange(
+ key,
+ RangeByScore(
+ InfBound.NEG_INF,
+ InfBound.POS_INF,
+ Limit(offset=1, count=2),
+ ),
+ )
+ ) == [b"two", b"three"]
+
+ assert (
+ glide_sync_client.zrange(
+ key,
+ RangeByScore(InfBound.NEG_INF, ScoreBoundary(3, is_inclusive=False)),
+ reverse=True,
+ )
+ == []
+ ) # end is greater than start with reverse set to True
+
+ assert (
+ glide_sync_client.zrange(
+ key,
+ RangeByScore(InfBound.POS_INF, ScoreBoundary(3, is_inclusive=False)),
+ )
+ == []
+ ) # start is greater than end
+
+ assert (
+ glide_sync_client.zrange_withscores(
+ key,
+ RangeByScore(InfBound.POS_INF, ScoreBoundary(3, is_inclusive=False)),
+ )
+ == {}
+ ) # start is greater than end
+
+ assert (
+ glide_sync_client.zrange_withscores(
+ key,
+ RangeByScore(InfBound.NEG_INF, ScoreBoundary(3, is_inclusive=False)),
+ reverse=True,
+ )
+ == {}
+ ) # end is greater than start with reverse set to True
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrange_bylex(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"a": 1, "b": 2, "c": 3}
+ assert glide_sync_client.zadd(key, members_scores=members_scores) == 3
+
+ assert glide_sync_client.zrange(
+ key,
+ RangeByLex(
+ start=InfBound.NEG_INF, end=LexBoundary("c", is_inclusive=False)
+ ),
+ ) == [b"a", b"b"]
+
+ assert (
+ glide_sync_client.zrange(
+ key,
+ RangeByLex(
+ start=InfBound.NEG_INF,
+ end=InfBound.POS_INF,
+ limit=Limit(offset=1, count=2),
+ ),
+ )
+ ) == [b"b", b"c"]
+
+ assert glide_sync_client.zrange(
+ key,
+ RangeByLex(
+ start=LexBoundary("c", is_inclusive=False), end=InfBound.NEG_INF
+ ),
+ reverse=True,
+ ) == [b"b", b"a"]
+
+ assert (
+ glide_sync_client.zrange(
+ key,
+ RangeByLex(
+ start=InfBound.NEG_INF, end=LexBoundary("c", is_inclusive=False)
+ ),
+ reverse=True,
+ )
+ == []
+ ) # end is greater than start with reverse set to True
+
+ assert (
+ glide_sync_client.zrange(
+ key,
+ RangeByLex(
+ start=InfBound.POS_INF, end=LexBoundary("c", is_inclusive=False)
+ ),
+ )
+ == []
+ ) # start is greater than end
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrange_different_types_of_keys(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+
+ assert glide_sync_client.zrange("non_existing_key", RangeByIndex(0, 1)) == []
+
+ assert (
+ glide_sync_client.zrange_withscores(
+ "non_existing_key", RangeByIndex(0, -1)
+ )
+ ) == {}
+
+ assert glide_sync_client.set(key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zrange(key, RangeByIndex(0, 1))
+
+ with pytest.raises(RequestError):
+ glide_sync_client.zrange_withscores(key, RangeByIndex(0, 1))
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrangestore_by_index(self, glide_sync_client: GlideSync):
+ destination = f"{{testKey}}:{get_random_string(10)}"
+ source = f"{{testKey}}:{get_random_string(10)}"
+ string_key = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:{get_random_string(10)}"
+
+ member_scores: Mapping[TEncodable, float] = {
+ "one": 1.0,
+ "two": 2.0,
+ "three": 3.0,
+ }
+ assert glide_sync_client.zadd(source, member_scores) == 3
+
+ # full range
+ assert (
+ glide_sync_client.zrangestore(destination, source, RangeByIndex(0, -1))
+ == 3
+ )
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"one": 1.0, "two": 2.0, "three": 3.0}) is True
+
+ # range from rank 0 to 1, from highest to lowest score
+ assert (
+ glide_sync_client.zrangestore(
+ destination, source, RangeByIndex(0, 1), True
+ )
+ == 2
+ )
+
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"two": 2.0, "three": 3.0}) is True
+
+ # incorrect range, as start > end
+ assert (
+ glide_sync_client.zrangestore(destination, source, RangeByIndex(3, 1)) == 0
+ )
+ assert (
+ glide_sync_client.zrange_withscores(destination, RangeByIndex(0, -1)) == {}
+ )
+
+ # non-existing source
+ assert (
+ glide_sync_client.zrangestore(
+ destination, non_existing_key, RangeByIndex(0, -1)
+ )
+ == 0
+ )
+ assert (
+ glide_sync_client.zrange_withscores(destination, RangeByIndex(0, -1)) == {}
+ )
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zrangestore(destination, string_key, RangeByIndex(0, -1))
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrangestore_by_score(self, glide_sync_client: GlideSync):
+ destination = f"{{testKey}}:{get_random_string(10)}"
+ source = f"{{testKey}}:{get_random_string(10)}"
+ string_key = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:{get_random_string(10)}"
+
+ member_scores: Mapping[TEncodable, float] = {
+ "one": 1.0,
+ "two": 2.0,
+ "three": 3.0,
+ }
+ assert glide_sync_client.zadd(source, member_scores) == 3
+
+ # range from negative infinity to 3 (exclusive)
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ source,
+ RangeByScore(InfBound.NEG_INF, ScoreBoundary(3, False)),
+ )
+ == 2
+ )
+
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"one": 1.0, "two": 2.0}) is True
+
+ # range from 1 (inclusive) to positive infinity
+ assert (
+ glide_sync_client.zrangestore(
+ destination, source, RangeByScore(ScoreBoundary(1), InfBound.POS_INF)
+ )
+ == 3
+ )
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"one": 1.0, "two": 2.0, "three": 3.0}) is True
+
+ # range from negative to positive infinity, limited to ranks 1 to 2
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ source,
+ RangeByScore(InfBound.NEG_INF, InfBound.POS_INF, Limit(1, 2)),
+ )
+ == 2
+ )
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"two": 2.0, "three": 3.0}) is True
+
+ # range from positive to negative infinity reversed, limited to ranks 1 to 2
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ source,
+ RangeByScore(InfBound.POS_INF, InfBound.NEG_INF, Limit(1, 2)),
+ True,
+ )
+ == 2
+ )
+
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"one": 1.0, "two": 2.0}) is True
+
+ # incorrect range as start > end
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ source,
+ RangeByScore(ScoreBoundary(3, False), InfBound.NEG_INF),
+ )
+ == 0
+ )
+ assert (
+ glide_sync_client.zrange_withscores(destination, RangeByIndex(0, -1)) == {}
+ )
+
+ # non-existing source
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ non_existing_key,
+ RangeByScore(InfBound.NEG_INF, ScoreBoundary(3, False)),
+ )
+ == 0
+ )
+ assert (
+ glide_sync_client.zrange_withscores(destination, RangeByIndex(0, -1)) == {}
+ )
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zrangestore(
+ destination,
+ string_key,
+ RangeByScore(ScoreBoundary(0), ScoreBoundary(3)),
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrangestore_by_lex(self, glide_sync_client: GlideSync):
+ destination = f"{{testKey}}:{get_random_string(10)}"
+ source = f"{{testKey}}:{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ member_scores: Mapping[TEncodable, float] = {"a": 1.0, "b": 2.0, "c": 3.0}
+ assert glide_sync_client.zadd(source, member_scores) == 3
+
+ # range from negative infinity to "c" (exclusive)
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ source,
+ RangeByLex(InfBound.NEG_INF, LexBoundary("c", False)),
+ )
+ == 2
+ )
+
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"a": 1.0, "b": 2.0}) is True
+
+ # range from "a" (inclusive) to positive infinity
+ assert (
+ glide_sync_client.zrangestore(
+ destination, source, RangeByLex(LexBoundary("a"), InfBound.POS_INF)
+ )
+ == 3
+ )
+
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"a": 1.0, "b": 2.0, "c": 3.0}) is True
+
+ # range from negative to positive infinity, limited to ranks 1 to 2
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ source,
+ RangeByLex(InfBound.NEG_INF, InfBound.POS_INF, Limit(1, 2)),
+ )
+ == 2
+ )
+
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"b": 2.0, "c": 3.0}) is True
+
+ # range from positive to negative infinity reversed, limited to ranks 1 to 2
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ source,
+ RangeByLex(InfBound.POS_INF, InfBound.NEG_INF, Limit(1, 2)),
+ True,
+ )
+ == 2
+ )
+
+ zrange_res = glide_sync_client.zrange_withscores(
+ destination, RangeByIndex(0, -1)
+ )
+ assert compare_maps(zrange_res, {"a": 1.0, "b": 2.0}) is True
+
+ # incorrect range as start > end
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ source,
+ RangeByLex(LexBoundary("c", False), InfBound.NEG_INF),
+ )
+ == 0
+ )
+ assert (
+ glide_sync_client.zrange_withscores(destination, RangeByIndex(0, -1)) == {}
+ )
+
+ # non-existing source
+ assert (
+ glide_sync_client.zrangestore(
+ destination,
+ non_existing_key,
+ RangeByLex(InfBound.NEG_INF, InfBound.POS_INF),
+ )
+ == 0
+ )
+ assert (
+ glide_sync_client.zrange_withscores(destination, RangeByIndex(0, -1)) == {}
+ )
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zrangestore(
+ destination, string_key, RangeByLex(InfBound.NEG_INF, InfBound.POS_INF)
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrank(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ members_scores: Mapping[TEncodable, float] = {"one": 1.5, "two": 2, "three": 3}
+ assert glide_sync_client.zadd(key, members_scores) == 3
+ assert glide_sync_client.zrank(key, "one") == 0
+ if not check_if_server_version_lt(glide_sync_client, "7.2.0"):
+ assert glide_sync_client.zrank_withscore(key, "one") == [0, 1.5]
+ assert glide_sync_client.zrank_withscore(key, "non_existing_field") is None
+ assert (
+ glide_sync_client.zrank_withscore("non_existing_key", "field") is None
+ )
+
+ assert glide_sync_client.zrank(key, "non_existing_field") is None
+ assert glide_sync_client.zrank("non_existing_key", "field") is None
+
+ assert glide_sync_client.set(key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zrank(key, "one")
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrevrank(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ string_key = get_random_string(10)
+ member_scores: Mapping[TEncodable, float] = {
+ "one": 1.0,
+ "two": 2.0,
+ "three": 3.0,
+ }
+
+ assert glide_sync_client.zadd(key, member_scores) == 3
+ assert glide_sync_client.zrevrank(key, "three") == 0
+ assert glide_sync_client.zrevrank(key, "non_existing_member") is None
+ assert (
+ glide_sync_client.zrevrank(non_existing_key, "non_existing_member") is None
+ )
+
+ if not check_if_server_version_lt(glide_sync_client, "7.2.0"):
+ assert glide_sync_client.zrevrank_withscore(key, "one") == [2, 1.0]
+ assert (
+ glide_sync_client.zrevrank_withscore(key, "non_existing_member")
+ is None
+ )
+ assert (
+ glide_sync_client.zrevrank_withscore(
+ non_existing_key, "non_existing_member"
+ )
+ is None
+ )
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zrevrank(string_key, "member")
+ with pytest.raises(RequestError):
+ glide_sync_client.zrevrank_withscore(string_key, "member")
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zdiff(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ key3 = f"{{testKey}}:3-{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ member_scores1: Mapping[TEncodable, float] = {
+ "one": 1.0,
+ "two": 2.0,
+ "three": 3.0,
+ }
+ member_scores2: Mapping[TEncodable, float] = {"two": 2.0}
+ member_scores3: Mapping[TEncodable, float] = {
+ "one": 1.0,
+ "two": 2.0,
+ "three": 3.0,
+ "four": 4.0,
+ }
+
+ assert glide_sync_client.zadd(key1, member_scores1) == 3
+ assert glide_sync_client.zadd(key2, member_scores2) == 1
+ assert glide_sync_client.zadd(key3, member_scores3) == 4
+
+ assert glide_sync_client.zdiff([key1, key2]) == [b"one", b"three"]
+ assert glide_sync_client.zdiff([key1, key3]) == []
+ assert glide_sync_client.zdiff([non_existing_key, key3]) == []
+
+ zdiff_map = glide_sync_client.zdiff_withscores([key1, key2])
+ expected_map = {
+ b"one": 1.0,
+ b"three": 3.0,
+ }
+ assert compare_maps(zdiff_map, expected_map) is True
+ assert (
+ compare_maps(glide_sync_client.zdiff_withscores([key1, key3]), {}) is True # type: ignore
+ )
+ non_exist_res = glide_sync_client.zdiff_withscores([non_existing_key, key3])
+ assert non_exist_res == {}
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.zdiff([])
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.zdiff_withscores([])
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zdiff([string_key, key2])
+
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zdiff_withscores([string_key, key2])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zdiffstore(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ key3 = f"{{testKey}}:3-{get_random_string(10)}"
+ key4 = f"{{testKey}}:4-{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ member_scores1: Mapping[TEncodable, float] = {
+ "one": 1.0,
+ "two": 2.0,
+ "three": 3.0,
+ }
+ member_scores2: Mapping[TEncodable, float] = {"two": 2.0}
+ member_scores3: Mapping[TEncodable, float] = {
+ "one": 1.0,
+ "two": 2.0,
+ "three": 3.0,
+ "four": 4.0,
+ }
+
+ assert glide_sync_client.zadd(key1, member_scores1) == 3
+ assert glide_sync_client.zadd(key2, member_scores2) == 1
+ assert glide_sync_client.zadd(key3, member_scores3) == 4
+
+ assert glide_sync_client.zdiffstore(key4, [key1, key2]) == 2
+
+ zrange_res = glide_sync_client.zrange_withscores(key4, RangeByIndex(0, -1))
+ assert compare_maps(zrange_res, {"one": 1.0, "three": 3.0}) is True
+
+ assert glide_sync_client.zdiffstore(key4, [key3, key2, key1]) == 1
+ assert glide_sync_client.zrange_withscores(key4, RangeByIndex(0, -1)) == {
+ b"four": 4.0
+ }
+
+ assert glide_sync_client.zdiffstore(key4, [key1, key3]) == 0
+ assert glide_sync_client.zrange_withscores(key4, RangeByIndex(0, -1)) == {}
+
+ assert glide_sync_client.zdiffstore(key4, [non_existing_key, key1]) == 0
+ assert glide_sync_client.zrange_withscores(key4, RangeByIndex(0, -1)) == {}
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zdiffstore(key4, [string_key, key1])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_bzmpop(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ key1 = f"{{test}}-1-f{get_random_string(10)}"
+ key2 = f"{{test}}-2-f{get_random_string(10)}"
+ non_existing_key = f"{{test}}-non_existing_key"
+ string_key = f"{{test}}-3-f{get_random_string(10)}"
+
+ assert (
+ glide_sync_client.zadd(
+ key1, cast(Mapping[TEncodable, float], {"a1": 1, "b1": 2})
+ )
+ == 2
+ )
+ assert (
+ glide_sync_client.zadd(
+ key2, cast(Mapping[TEncodable, float], {"a2": 0.1, "b2": 0.2})
+ )
+ == 2
+ )
+
+ assert glide_sync_client.bzmpop([key1, key2], ScoreFilter.MAX, 0.1) == [
+ key1.encode(),
+ {b"b1": 2},
+ ]
+ assert glide_sync_client.bzmpop([key2, key1], ScoreFilter.MAX, 0.1, 10) == [
+ key2.encode(),
+ {b"b2": 0.2, b"a2": 0.1},
+ ]
+
+ # ensure that command doesn't time out even if timeout > request timeout (250ms by default)
+ assert (
+ glide_sync_client.bzmpop([non_existing_key], ScoreFilter.MIN, 0.5) is None
+ )
+ assert (
+ glide_sync_client.bzmpop([non_existing_key], ScoreFilter.MIN, 0.55, 1)
+ is None
+ )
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.bzmpop([string_key], ScoreFilter.MAX, 0.1)
+ with pytest.raises(RequestError):
+ glide_sync_client.bzmpop([string_key], ScoreFilter.MAX, 0.1, 1)
+
+ # incorrect argument: key list should not be empty
+ with pytest.raises(RequestError):
+ assert glide_sync_client.bzmpop([], ScoreFilter.MAX, 0.1, 1)
+
+ # incorrect argument: count should be greater than 0
+ with pytest.raises(RequestError):
+ assert glide_sync_client.bzmpop([key1], ScoreFilter.MAX, 0.1, 0)
+
+ # check that order of entries in the response is preserved
+ entries: Dict[TEncodable, float] = {}
+ for i in range(0, 10):
+ entries.update({f"a{i}": float(i)})
+
+ assert glide_sync_client.zadd(key2, entries) == 10
+ result = glide_sync_client.bzmpop([key2], ScoreFilter.MIN, 0.1, 10)
+ assert result is not None
+ result_map = cast(Mapping[bytes, float], result[1])
+ assert compare_maps(entries, result_map) is True # type: ignore
+
+ def endless_bzmpop_call():
+ glide_sync_client.bzmpop(["non_existent_key"], ScoreFilter.MAX, 0)
+
+ # bzmpop is called against a non-existing key with no timeout, but we wrap the call in an asyncio timeout to
+ # avoid having the test block forever
+ with pytest.raises(asyncio.TimeoutError):
+ asyncio.wait_for(endless_bzmpop_call(), timeout=0.5)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrandmember(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ string_key = get_random_string(10)
+ scores: Mapping[TEncodable, float] = {"one": 1, "two": 2}
+ assert glide_sync_client.zadd(key, scores) == 2
+
+ member = glide_sync_client.zrandmember(key)
+ # TODO: remove when functions API is fixed
+ assert isinstance(member, bytes)
+ assert member.decode() in scores
+ assert glide_sync_client.zrandmember("non_existing_key") is None
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zrandmember(string_key)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrandmember_count(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ string_key = get_random_string(10)
+ scores: Mapping[TEncodable, float] = {"one": 1, "two": 2}
+ assert glide_sync_client.zadd(key, scores) == 2
+
+ # unique values are expected as count is positive
+ members = glide_sync_client.zrandmember_count(key, 4)
+ assert len(members) == 2
+ assert set(members) == {b"one", b"two"}
+
+ # duplicate values are expected as count is negative
+ members = glide_sync_client.zrandmember_count(key, -4)
+ assert len(members) == 4
+ for member in members:
+ # TODO: remove when functions API is fixed
+ assert isinstance(member, bytes)
+ assert member.decode() in scores
+
+ assert glide_sync_client.zrandmember_count(key, 0) == []
+ assert glide_sync_client.zrandmember_count("non_existing_key", 0) == []
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zrandmember_count(string_key, 5)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zrandmember_withscores(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ string_key = get_random_string(10)
+ scores: Mapping[TEncodable, float] = {"one": 1, "two": 2}
+ assert glide_sync_client.zadd(key, scores) == 2
+
+ # unique values are expected as count is positive
+ elements = glide_sync_client.zrandmember_withscores(key, 4)
+ assert len(elements) == 2
+
+ for member, score in elements:
+ # TODO: remove when functions API is fixed
+ assert isinstance(member, bytes)
+ assert scores[(member).decode()] == score
+
+ # duplicate values are expected as count is negative
+ elements = glide_sync_client.zrandmember_withscores(key, -4)
+ assert len(elements) == 4
+ for member, score in elements:
+ # TODO: remove when functions API is fixed
+ assert isinstance(member, bytes)
+ assert scores[(member).decode()] == score
+
+ assert glide_sync_client.zrandmember_withscores(key, 0) == []
+ assert glide_sync_client.zrandmember_withscores("non_existing_key", 0) == []
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zrandmember_withscores(string_key, 5)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zintercard(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ member_scores1: Mapping[TEncodable, float] = {
+ "one": 1.0,
+ "two": 2.0,
+ "three": 3.0,
+ }
+ member_scores2: Mapping[TEncodable, float] = {
+ "two": 2.0,
+ "three": 3.0,
+ "four": 4.0,
+ }
+
+ assert glide_sync_client.zadd(key1, member_scores1) == 3
+ assert glide_sync_client.zadd(key2, member_scores2) == 3
+
+ assert glide_sync_client.zintercard([key1, key2]) == 2
+ assert glide_sync_client.zintercard([key1, non_existing_key]) == 0
+
+ assert glide_sync_client.zintercard([key1, key2], 0) == 2
+ assert glide_sync_client.zintercard([key1, key2], 1) == 1
+ assert glide_sync_client.zintercard([key1, key2], 3) == 2
+
+ # invalid argument - key list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.zintercard([])
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zintercard([string_key])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zmpop(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ key1 = f"{{test}}-1-f{get_random_string(10)}"
+ key2 = f"{{test}}-2-f{get_random_string(10)}"
+ non_existing_key = f"{{test}}-non_existing_key"
+ string_key = f"{{test}}-3-f{get_random_string(10)}"
+
+ assert glide_sync_client.zadd(key1, {"a1": 1, "b1": 2}) == 2
+ assert glide_sync_client.zadd(key2, {"a2": 0.1, "b2": 0.2}) == 2
+
+ assert glide_sync_client.zmpop([key1, key2], ScoreFilter.MAX) == [
+ key1.encode(),
+ {b"b1": 2},
+ ]
+ assert glide_sync_client.zmpop([key2, key1], ScoreFilter.MAX, 10) == [
+ key2.encode(),
+ {b"b2": 0.2, b"a2": 0.1},
+ ]
+
+ assert glide_sync_client.zmpop([non_existing_key], ScoreFilter.MIN) is None
+ assert glide_sync_client.zmpop([non_existing_key], ScoreFilter.MIN, 1) is None
+
+ # key exists, but it is not a sorted set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zmpop([string_key], ScoreFilter.MAX)
+ with pytest.raises(RequestError):
+ glide_sync_client.zmpop([string_key], ScoreFilter.MAX, 1)
+
+ # incorrect argument: key list should not be empty
+ with pytest.raises(RequestError):
+ assert glide_sync_client.zmpop([], ScoreFilter.MAX, 1)
+
+ # incorrect argument: count should be greater than 0
+ with pytest.raises(RequestError):
+ assert glide_sync_client.zmpop([key1], ScoreFilter.MAX, 0)
+
+ # check that order of entries in the response is preserved
+ entries: Dict[TEncodable, float] = {}
+ for i in range(0, 10):
+ entries[f"a{i}"] = float(i)
+
+ assert glide_sync_client.zadd(key2, entries) == 10
+ result = glide_sync_client.zmpop([key2], ScoreFilter.MIN, 10)
+ assert result is not None
+ result_map = cast(Mapping[bytes, float], result[1])
+ assert compare_maps(entries, result_map) is True # type: ignore
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_type(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ assert glide_sync_client.set(key, "value") == OK
+ assert (glide_sync_client.type(key)).lower() == b"string"
+ assert glide_sync_client.delete([key]) == 1
+
+ assert glide_sync_client.set(key.encode(), "value") == OK
+ assert (glide_sync_client.type(key.encode())).lower() == b"string"
+ assert glide_sync_client.delete([key.encode()]) == 1
+
+ assert glide_sync_client.lpush(key, ["value"]) == 1
+ assert (glide_sync_client.type(key)).lower() == b"list"
+ assert glide_sync_client.delete([key]) == 1
+
+ assert glide_sync_client.sadd(key, ["value"]) == 1
+ assert (glide_sync_client.type(key)).lower() == b"set"
+ assert glide_sync_client.delete([key]) == 1
+
+ assert glide_sync_client.zadd(key, {"member": 1.0}) == 1
+ assert (glide_sync_client.type(key)).lower() == b"zset"
+ assert glide_sync_client.delete([key]) == 1
+
+ assert glide_sync_client.hset(key, {"field": "value"}) == 1
+ assert (glide_sync_client.type(key)).lower() == b"hash"
+ assert glide_sync_client.delete([key]) == 1
+
+ glide_sync_client.xadd(key, [("field", "value")])
+ assert glide_sync_client.type(key) == b"stream"
+ assert glide_sync_client.delete([key]) == 1
+
+ assert (glide_sync_client.type(key)).lower() == b"none"
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sort_and_sort_store_with_get_or_by_args(
+ self, glide_sync_client: GlideSync
+ ):
+ if isinstance(
+ glide_sync_client, GlideClusterClient
+ ) and check_if_server_version_lt(glide_sync_client, "8.0.0"):
+ return pytest.mark.skip(
+ reason=f"Valkey version required in cluster mode>= 8.0.0"
+ )
+ key = "{user}" + get_random_string(10)
+ store = "{user}" + get_random_string(10)
+ user_key1, user_key2, user_key3, user_key4, user_key5 = (
+ "{user}:1",
+ "{user}:2",
+ "{user}:3",
+ "{user}:4",
+ "{user}:5",
+ )
+
+ # Prepare some data. Some keys and values randomaly encoded
+ assert glide_sync_client.hset(user_key1, {"name": "Alice", "age": "30"}) == 2
+ assert (
+ glide_sync_client.hset(user_key2.encode(), {"name": "Bob", "age": "25"})
+ == 2
+ )
+ assert glide_sync_client.hset(user_key3, {"name": "Charlie", "age": "35"}) == 2
+ assert (
+ glide_sync_client.hset(user_key4, {"name": "Dave", "age".encode(): "20"})
+ == 2
+ )
+ assert (
+ glide_sync_client.hset(user_key5, {"name": "Eve", "age": "40".encode()})
+ == 2
+ )
+ assert glide_sync_client.lpush("{user}_ids", ["5", "4", "3", "2", "1"]) == 5
+
+ # SORT_RO Available since: 7.0.0
+ skip_sort_ro_test = False
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ skip_sort_ro_test = True
+
+ # Test sort with all arguments
+ assert glide_sync_client.lpush(key, ["3", "1", "2"]) == 3
+ result = glide_sync_client.sort(
+ key,
+ limit=Limit(0, 2),
+ get_patterns=["{user}:*->name"],
+ order=OrderBy.ASC,
+ alpha=True,
+ )
+ assert result == [b"Alice", b"Bob"]
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(
+ key,
+ limit=Limit(0, 2),
+ get_patterns=[b"{user}:*->name"],
+ order=OrderBy.ASC,
+ alpha=True,
+ )
+ assert result_ro == [b"Alice", b"Bob"]
+
+ # Test sort_store with all arguments
+ sort_store_result = glide_sync_client.sort_store(
+ key,
+ store,
+ limit=Limit(0, 2),
+ get_patterns=["{user}:*->name"],
+ order=OrderBy.ASC,
+ alpha=True,
+ )
+ assert sort_store_result == 2
+ sorted_list = glide_sync_client.lrange(store, 0, -1)
+ assert sorted_list == [b"Alice", b"Bob"]
+
+ # Test sort with `by` argument
+ result = glide_sync_client.sort(
+ "{user}_ids",
+ by_pattern="{user}:*->age",
+ get_patterns=["{user}:*->name"],
+ alpha=True,
+ )
+ assert result == [b"Dave", b"Bob", b"Alice", b"Charlie", b"Eve"]
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(
+ "{user}_ids",
+ by_pattern=b"{user}:*->age",
+ get_patterns=["{user}:*->name"],
+ alpha=True,
+ )
+ assert result_ro == [b"Dave", b"Bob", b"Alice", b"Charlie", b"Eve"]
+
+ # Test sort with `by` argument with missing keys to sort by
+ assert glide_sync_client.lpush("{user}_ids", ["a"]) == 6
+ result = glide_sync_client.sort(
+ "{user}_ids",
+ by_pattern="{user}:*->age",
+ get_patterns=["{user}:*->name"],
+ alpha=True,
+ )
+ assert result == convert_string_to_bytes_object(
+ [None, "Dave", "Bob", "Alice", "Charlie", "Eve"]
+ )
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(
+ "{user}_ids",
+ by_pattern="{user}:*->age",
+ get_patterns=["{user}:*->name"],
+ alpha=True,
+ )
+ assert result_ro == [None, b"Dave", b"Bob", b"Alice", b"Charlie", b"Eve"]
+
+ # Test sort with `by` argument with missing keys to sort by
+ result = glide_sync_client.sort(
+ "{user}_ids",
+ by_pattern="{user}:*->name",
+ get_patterns=["{user}:*->age"],
+ alpha=True,
+ )
+ assert result == convert_string_to_bytes_object(
+ [None, "30", "25", "35", "20", "40"]
+ )
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(
+ "{user}_ids",
+ by_pattern="{user}:*->name",
+ get_patterns=["{user}:*->age"],
+ alpha=True,
+ )
+ assert result_ro == [None, b"30", b"25", b"35", b"20", b"40"]
+
+ # Test Limit with count 0
+ result = glide_sync_client.sort(
+ "{user}_ids",
+ limit=Limit(0, 0),
+ alpha=True,
+ )
+ assert result == []
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(
+ "{user}_ids",
+ limit=Limit(0, 0),
+ alpha=True,
+ )
+ assert result_ro == []
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sort_and_sort_store_without_get_or_by_args(
+ self, glide_sync_client: GlideSync
+ ):
+ key = "{SameSlotKey}" + get_random_string(10)
+ store = "{SameSlotKey}" + get_random_string(10)
+
+ # SORT_RO Available since: 7.0.0
+ skip_sort_ro_test = False
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ skip_sort_ro_test = True
+
+ # Test sort with non-existing key
+ result = glide_sync_client.sort("non_existing_key")
+ assert result == []
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(b"non_existing_key")
+ assert result_ro == []
+
+ # Test sort_store with non-existing key
+ sort_store_result = glide_sync_client.sort_store(
+ "{SameSlotKey}:non_existing_key", store
+ )
+ assert sort_store_result == 0
+
+ # Test each argument separately
+ assert glide_sync_client.lpush(key, ["5", "2", "4", "1", "3"]) == 5
+
+ # Test w/o flags
+ result = glide_sync_client.sort(key)
+ assert result == [b"1", b"2", b"3", b"4", b"5"]
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(key)
+ assert result_ro == [b"1", b"2", b"3", b"4", b"5"]
+
+ # limit argument
+ result = glide_sync_client.sort(key, limit=Limit(1, 3))
+ assert result == [b"2", b"3", b"4"]
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(key, limit=Limit(1, 3))
+ assert result_ro == [b"2", b"3", b"4"]
+
+ # order argument
+ result = glide_sync_client.sort(key, order=OrderBy.DESC)
+ assert result == [b"5", b"4", b"3", b"2", b"1"]
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(key, order=OrderBy.DESC)
+ assert result_ro == [b"5", b"4", b"3", b"2", b"1"]
+
+ assert glide_sync_client.lpush(key, ["a"]) == 6
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.sort(key)
+ assert "can't be converted into double" in str(e).lower()
+
+ if not skip_sort_ro_test:
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.sort_ro(key)
+ assert "can't be converted into double" in str(e).lower()
+
+ # alpha argument
+ result = glide_sync_client.sort(key, alpha=True)
+ assert result == [b"1", b"2", b"3", b"4", b"5", b"a"]
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(key, alpha=True)
+ assert result_ro == [b"1", b"2", b"3", b"4", b"5", b"a"]
+
+ # Combining multiple arguments
+ result = glide_sync_client.sort(
+ key, limit=Limit(1, 3), order=OrderBy.DESC, alpha=True
+ )
+ assert result == [b"5", b"4", b"3"]
+
+ if not skip_sort_ro_test:
+ result_ro = glide_sync_client.sort_ro(
+ key, limit=Limit(1, 3), order=OrderBy.DESC, alpha=True
+ )
+ assert result_ro == [b"5", b"4", b"3"]
+
+ # Test sort_store with combined arguments
+ sort_store_result = glide_sync_client.sort_store(
+ key, store, limit=Limit(1, 3), order=OrderBy.DESC, alpha=True
+ )
+ assert sort_store_result == 3
+ sorted_list = glide_sync_client.lrange(store, 0, -1)
+ assert sorted_list == [b"5", b"4", b"3"]
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_echo(self, glide_sync_client: GlideSync):
+ message = get_random_string(5)
+ assert glide_sync_client.echo(message) == message.encode()
+ if isinstance(glide_sync_client, GlideClusterClient):
+ echo_dict = glide_sync_client.echo(message, AllNodes())
+ assert isinstance(echo_dict, dict)
+ for value in echo_dict.values():
+ assert value == message.encode()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_dbsize(self, glide_sync_client: GlideSync):
+ assert glide_sync_client.custom_command(["FLUSHALL"]) == OK
+
+ assert glide_sync_client.dbsize() == 0
+ key_value_pairs = [(get_random_string(10), "foo") for _ in range(10)]
+
+ for key, value in key_value_pairs:
+ assert glide_sync_client.set(key, value) == OK
+ assert glide_sync_client.dbsize() == 10
+
+ if isinstance(glide_sync_client, GlideClusterClient):
+ assert glide_sync_client.custom_command(["FLUSHALL"]) == OK
+ key = get_random_string(5)
+ assert glide_sync_client.set(key, value) == OK
+ assert glide_sync_client.dbsize(SlotKeyRoute(SlotType.PRIMARY, key)) == 1
+ else:
+ assert glide_sync_client.select(1) == OK
+ assert glide_sync_client.dbsize() == 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_time(self, glide_sync_client: GlideSync):
+ current_time = int(time.time()) - 1
+ result = glide_sync_client.time()
+ assert len(result) == 2
+ assert isinstance(result, list)
+ assert int(result[0]) > current_time
+ assert 0 < int(result[1]) < 1000000
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lastsave(self, glide_sync_client: GlideSync):
+ yesterday = date.today() - timedelta(1)
+ yesterday_unix_time = time.mktime(yesterday.timetuple())
+
+ result = glide_sync_client.lastsave()
+ assert isinstance(result, int)
+ assert result > yesterday_unix_time
+
+ if isinstance(glide_sync_client, GlideClusterClient):
+ # test with single-node route
+ result = glide_sync_client.lastsave(RandomNode())
+ assert isinstance(result, int)
+ assert result > yesterday_unix_time
+
+ # test with multi-node route
+ result = glide_sync_client.lastsave(AllNodes())
+ assert isinstance(result, dict)
+ for lastsave_time in result.values():
+ assert lastsave_time > yesterday_unix_time
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_append(self, glide_sync_client: GlideSync):
+ key, value = get_random_string(10), get_random_string(5)
+ assert glide_sync_client.append(key, value) == 5
+
+ assert glide_sync_client.append(key, value) == 10
+ assert glide_sync_client.get(key) == (value * 2).encode()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xadd_xtrim_xlen(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ string_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ field, field2 = get_random_string(10), get_random_string(10)
+
+ assert (
+ glide_sync_client.xadd(
+ key,
+ [(field, "foo"), (field2, "bar")],
+ StreamAddOptions(make_stream=False),
+ )
+ is None
+ )
+
+ assert (
+ glide_sync_client.xadd(
+ key, [(field, "foo1"), (field2, "bar1")], StreamAddOptions(id="0-1")
+ )
+ == b"0-1"
+ )
+
+ assert (
+ glide_sync_client.xadd(key, [(field, "foo2"), (field2, "bar2")])
+ ) is not None
+ assert glide_sync_client.xlen(key) == 2
+
+ # This will trim the first entry.
+ id = glide_sync_client.xadd(
+ key,
+ [(field, "foo3"), (field2, "bar3")],
+ StreamAddOptions(trim=TrimByMaxLen(exact=True, threshold=2)),
+ )
+
+ assert id is not None
+ # TODO: remove when functions API is fixed
+ assert isinstance(id, bytes)
+ assert glide_sync_client.xlen(key) == 2
+
+ # This will trim the 2nd entry.
+ assert (
+ glide_sync_client.xadd(
+ key,
+ [(field, "foo4"), (field2, "bar4")],
+ StreamAddOptions(trim=TrimByMinId(exact=True, threshold=id.decode())),
+ )
+ is not None
+ )
+ assert glide_sync_client.xlen(key) == 2
+
+ assert glide_sync_client.xtrim(key, TrimByMaxLen(threshold=1, exact=True)) == 1
+ assert glide_sync_client.xlen(key) == 1
+
+ assert glide_sync_client.xtrim(key, TrimByMaxLen(threshold=0, exact=True)) == 1
+ # Unlike other Valkey collection types, stream keys still exist even after removing all entries
+ assert glide_sync_client.exists([key]) == 1
+ assert glide_sync_client.xlen(key) == 0
+
+ assert (
+ glide_sync_client.xtrim(
+ non_existing_key, TrimByMaxLen(threshold=1, exact=True)
+ )
+ == 0
+ )
+ assert glide_sync_client.xlen(non_existing_key) == 0
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo")
+ with pytest.raises(RequestError):
+ glide_sync_client.xtrim(string_key, TrimByMaxLen(threshold=1, exact=True))
+ with pytest.raises(RequestError):
+ glide_sync_client.xlen(string_key)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xdel(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ string_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ stream_id1 = "0-1"
+ stream_id2 = "0-2"
+ stream_id3 = "0-3"
+
+ assert (
+ glide_sync_client.xadd(
+ key1, [("f1", "foo1"), ("f2", "foo2")], StreamAddOptions(stream_id1)
+ )
+ == stream_id1.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key1, [("f1", "foo1"), ("f2", "foo2")], StreamAddOptions(stream_id2)
+ )
+ == stream_id2.encode()
+ )
+ assert glide_sync_client.xlen(key1) == 2
+
+ # deletes one stream id, and ignores anything invalid
+ assert glide_sync_client.xdel(key1, [stream_id1, stream_id3]) == 1
+ assert glide_sync_client.xdel(non_existing_key, [stream_id3]) == 0
+
+ # invalid argument - id list should not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.xdel(key1, [])
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xdel(string_key, [stream_id3])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xrange_and_xrevrange(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ string_key = get_random_string(10)
+ stream_id1 = "0-1"
+ stream_id2 = "0-2"
+ stream_id3 = "0-3"
+
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1", "v1")], StreamAddOptions(id=stream_id1)
+ )
+ == stream_id1.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key, [("f2", "v2")], StreamAddOptions(id=stream_id2)
+ )
+ == stream_id2.encode()
+ )
+ assert glide_sync_client.xlen(key) == 2
+
+ # get everything from the stream
+ result = glide_sync_client.xrange(key, MinId(), MaxId())
+ assert convert_bytes_to_string_object(result) == {
+ stream_id1: [["f1", "v1"]],
+ stream_id2: [["f2", "v2"]],
+ }
+ result = glide_sync_client.xrevrange(key, MaxId(), MinId())
+ assert convert_bytes_to_string_object(result) == {
+ stream_id2: [["f2", "v2"]],
+ stream_id1: [["f1", "v1"]],
+ }
+
+ # returns empty mapping if + before -
+ assert glide_sync_client.xrange(key, MaxId(), MinId()) == {}
+ # rev search returns empty mapping if - before +
+ assert glide_sync_client.xrevrange(key, MinId(), MaxId()) == {}
+
+ assert (
+ glide_sync_client.xadd(
+ key, [("f3", "v3")], StreamAddOptions(id=stream_id3)
+ )
+ == stream_id3.encode()
+ )
+
+ # get the newest entry
+ result = glide_sync_client.xrange(
+ key, ExclusiveIdBound(stream_id2), ExclusiveIdBound.from_timestamp(5), 1
+ )
+ assert convert_bytes_to_string_object(result) == {stream_id3: [["f3", "v3"]]}
+ result = glide_sync_client.xrevrange(
+ key, ExclusiveIdBound.from_timestamp(5), ExclusiveIdBound(stream_id2), 1
+ )
+ assert convert_bytes_to_string_object(result) == {stream_id3: [["f3", "v3"]]}
+
+ # xrange/xrevrange against an emptied stream
+ assert glide_sync_client.xdel(key, [stream_id1, stream_id2, stream_id3]) == 3
+ assert glide_sync_client.xrange(key, MinId(), MaxId(), 10) == {}
+ assert glide_sync_client.xrevrange(key, MaxId(), MinId(), 10) == {}
+
+ assert glide_sync_client.xrange(non_existing_key, MinId(), MaxId()) == {}
+ assert glide_sync_client.xrevrange(non_existing_key, MaxId(), MinId()) == {}
+
+ # count value < 1 returns None
+ assert glide_sync_client.xrange(key, MinId(), MaxId(), 0) is None
+ assert glide_sync_client.xrange(key, MinId(), MaxId(), -1) is None
+ assert glide_sync_client.xrevrange(key, MaxId(), MinId(), 0) is None
+ assert glide_sync_client.xrevrange(key, MaxId(), MinId(), -1) is None
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo")
+ with pytest.raises(RequestError):
+ glide_sync_client.xrange(string_key, MinId(), MaxId())
+ with pytest.raises(RequestError):
+ glide_sync_client.xrevrange(string_key, MaxId(), MinId())
+
+ # invalid start bound
+ with pytest.raises(RequestError):
+ glide_sync_client.xrange(key, IdBound("not_a_stream_id"), MaxId())
+ with pytest.raises(RequestError):
+ glide_sync_client.xrevrange(key, MaxId(), IdBound("not_a_stream_id"))
+
+ # invalid end bound
+ with pytest.raises(RequestError):
+ glide_sync_client.xrange(key, MinId(), IdBound("not_a_stream_id"))
+ with pytest.raises(RequestError):
+ glide_sync_client.xrevrange(key, IdBound("not_a_stream_id"), MinId())
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xread(
+ self, glide_sync_client: GlideSync, cluster_mode, protocol, request
+ ):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:3-{get_random_string(10)}"
+ stream_id1_1 = "1-1"
+ stream_id1_2 = "1-2"
+ stream_id1_3 = "1-3"
+ stream_id2_1 = "2-1"
+ stream_id2_2 = "2-2"
+ stream_id2_3 = "2-3"
+ non_existing_id = "99-99"
+
+ # setup first entries in streams key1 and key2
+ assert (
+ glide_sync_client.xadd(
+ key1, [("f1_1", "v1_1")], StreamAddOptions(id=stream_id1_1)
+ )
+ == stream_id1_1.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key2, [("f2_1", "v2_1")], StreamAddOptions(id=stream_id2_1)
+ )
+ == stream_id2_1.encode()
+ )
+
+ # setup second entries in streams key1 and key2
+ assert (
+ glide_sync_client.xadd(
+ key1, [("f1_2", "v1_2")], StreamAddOptions(id=stream_id1_2)
+ )
+ == stream_id1_2.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key2, [("f2_2", "v2_2")], StreamAddOptions(id=stream_id2_2)
+ )
+ == stream_id2_2.encode()
+ )
+
+ # setup third entries in streams key1 and key2
+ assert (
+ glide_sync_client.xadd(
+ key1, [("f1_3", "v1_3")], StreamAddOptions(id=stream_id1_3)
+ )
+ == stream_id1_3.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key2, [("f2_3", "v2_3")], StreamAddOptions(id=stream_id2_3)
+ )
+ == stream_id2_3.encode()
+ )
+
+ assert glide_sync_client.xread({key1: stream_id1_1, key2: stream_id2_1}) == {
+ key1.encode(): {
+ stream_id1_2.encode(): [[b"f1_2", b"v1_2"]],
+ stream_id1_3.encode(): [[b"f1_3", b"v1_3"]],
+ },
+ key2.encode(): {
+ stream_id2_2.encode(): [[b"f2_2", b"v2_2"]],
+ stream_id2_3.encode(): [[b"f2_3", b"v2_3"]],
+ },
+ }
+
+ assert glide_sync_client.xread({non_existing_key: stream_id1_1}) is None
+ assert glide_sync_client.xread({key1: non_existing_id}) is None
+
+ # passing an empty read options argument has no effect
+ assert glide_sync_client.xread({key1: stream_id1_1}, StreamReadOptions()) == {
+ key1.encode(): {
+ stream_id1_2.encode(): [[b"f1_2", b"v1_2"]],
+ stream_id1_3.encode(): [[b"f1_3", b"v1_3"]],
+ },
+ }
+
+ assert glide_sync_client.xread(
+ {key1: stream_id1_1}, StreamReadOptions(count=1)
+ ) == {
+ key1.encode(): {
+ stream_id1_2.encode(): [[b"f1_2", b"v1_2"]],
+ },
+ }
+ assert glide_sync_client.xread(
+ {key1: stream_id1_1}, StreamReadOptions(count=1, block_ms=1000)
+ ) == {
+ key1.encode(): {
+ stream_id1_2.encode(): [[b"f1_2", b"v1_2"]],
+ },
+ }
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xread_edge_cases_and_failures(
+ self, glide_sync_client: GlideSync, cluster_mode, protocol, request
+ ):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ string_key = f"{{testKey}}:2-{get_random_string(10)}"
+ stream_id0 = "0-0"
+ stream_id1 = "1-1"
+ stream_id2 = "1-2"
+
+ assert (
+ glide_sync_client.xadd(
+ key1, [("f1", "v1")], StreamAddOptions(id=stream_id1)
+ )
+ == stream_id1.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key1, [("f2", "v2")], StreamAddOptions(id=stream_id2)
+ )
+ == stream_id2.encode()
+ )
+
+ test_sync_client = create_client(
+ request=request, protocol=protocol, cluster_mode=cluster_mode, timeout=900
+ )
+ # ensure command doesn't time out even if timeout > request timeout
+ assert (
+ test_sync_client.xread(
+ {key1: stream_id2}, StreamReadOptions(block_ms=1000)
+ )
+ is None
+ )
+
+ def endless_xread_call():
+ test_sync_client.xread({key1: stream_id2}, StreamReadOptions(block_ms=0))
+
+ # when xread is called with a block timeout of 0, it should never timeout, but we wrap the test with a timeout
+ # to avoid the test getting stuck forever.
+ with pytest.raises(asyncio.TimeoutError):
+ asyncio.wait_for(endless_xread_call(), timeout=3)
+
+ # if count is non-positive, it is ignored
+ assert glide_sync_client.xread(
+ {key1: stream_id0}, StreamReadOptions(count=0)
+ ) == {
+ key1.encode(): {
+ stream_id1.encode(): [[b"f1", b"v1"]],
+ stream_id2.encode(): [[b"f2", b"v2"]],
+ },
+ }
+ assert glide_sync_client.xread(
+ {key1: stream_id0}, StreamReadOptions(count=-1)
+ ) == {
+ key1.encode(): {
+ stream_id1.encode(): [[b"f1", b"v1"]],
+ stream_id2.encode(): [[b"f2", b"v2"]],
+ },
+ }
+
+ # invalid stream ID
+ with pytest.raises(RequestError):
+ glide_sync_client.xread({key1: "invalid_stream_id"})
+
+ # invalid argument - block cannot be negative
+ with pytest.raises(RequestError):
+ glide_sync_client.xread({key1: stream_id1}, StreamReadOptions(block_ms=-1))
+
+ # invalid argument - keys_and_ids must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.xread({})
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo")
+ with pytest.raises(RequestError):
+ glide_sync_client.xread({string_key: stream_id1, key1: stream_id1})
+ with pytest.raises(RequestError):
+ glide_sync_client.xread({key1: stream_id1, string_key: stream_id1})
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xgroup_create_xgroup_destroy(
+ self, glide_sync_client: GlideSync, cluster_mode, protocol, request
+ ):
+ key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ string_key = get_random_string(10)
+ group_name1 = get_random_string(10)
+ group_name2 = get_random_string(10)
+ stream_id = "0-1"
+
+ # trying to create a consumer group for a non-existing stream without the "MKSTREAM" arg results in error
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_create(non_existing_key, group_name1, stream_id)
+
+ # calling with the "MKSTREAM" arg should create the new stream automatically
+ assert (
+ glide_sync_client.xgroup_create(
+ key, group_name1, stream_id, StreamGroupOptions(make_stream=True)
+ )
+ == OK
+ )
+
+ # invalid arg - group names must be unique, but group_name1 already exists
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_create(key, group_name1, stream_id)
+
+ # invalid stream ID format
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_create(
+ key, group_name2, "invalid_stream_id_format"
+ )
+
+ assert glide_sync_client.xgroup_destroy(key, group_name1) is True
+ # calling xgroup_destroy again returns False because the group was already destroyed above
+ assert glide_sync_client.xgroup_destroy(key, group_name1) is False
+
+ # attempting to destroy a group for a non-existing key should raise an error
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_destroy(non_existing_key, group_name1)
+
+ # "ENTRIESREAD" option was added in Valkey 7.0.0
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_create(
+ key,
+ group_name1,
+ stream_id,
+ StreamGroupOptions(entries_read=10),
+ )
+ else:
+ assert (
+ glide_sync_client.xgroup_create(
+ key,
+ group_name1,
+ stream_id,
+ StreamGroupOptions(entries_read=10),
+ )
+ == OK
+ )
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_create(
+ string_key, group_name1, stream_id, StreamGroupOptions(make_stream=True)
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_destroy(string_key, group_name1)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xgroup_create_consumer_xreadgroup_xgroup_del_consumer(
+ self, glide_sync_client: GlideSync, cluster_mode, protocol, request
+ ):
+ key = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:{get_random_string(10)}"
+ string_key = f"{{testKey}}:{get_random_string(10)}"
+ group_name = get_random_string(10)
+ consumer_name = get_random_string(10)
+ stream_id0 = "0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+ stream_id1_2 = "1-2"
+ stream_id1_3 = "1-3"
+
+ # create group and consumer for the group
+ assert (
+ glide_sync_client.xgroup_create(
+ key, group_name, stream_id0, StreamGroupOptions(make_stream=True)
+ )
+ == OK
+ )
+ assert (
+ glide_sync_client.xgroup_create_consumer(key, group_name, consumer_name)
+ is True
+ )
+
+ # attempting to create/delete a consumer for a group that does not exist results in a NOGROUP request error
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_create_consumer(
+ key, "non_existing_group", consumer_name
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_del_consumer(
+ key, "non_existing_group", consumer_name
+ )
+
+ # attempt to create consumer for group again
+ assert (
+ glide_sync_client.xgroup_create_consumer(key, group_name, consumer_name)
+ is False
+ )
+
+ # attempting to delete a consumer that has not been created yet returns 0
+ assert (
+ glide_sync_client.xgroup_del_consumer(
+ key, group_name, "non_existing_consumer"
+ )
+ == 0
+ )
+
+ # add two stream entries
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_0", "v1_0")], StreamAddOptions(stream_id1_0)
+ )
+ == stream_id1_0.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_1", "v1_1")], StreamAddOptions(stream_id1_1)
+ )
+ == stream_id1_1.encode()
+ )
+
+ # read the entire stream for the consumer and mark messages as pending
+ assert glide_sync_client.xreadgroup(
+ {key: ">"},
+ group_name,
+ consumer_name,
+ StreamReadGroupOptions(block_ms=1000, count=10),
+ ) == {
+ key.encode(): {
+ stream_id1_0.encode(): [[b"f1_0", b"v1_0"]],
+ stream_id1_1.encode(): [[b"f1_1", b"v1_1"]],
+ }
+ }
+
+ # delete one of the stream entries
+ assert glide_sync_client.xdel(key, [stream_id1_0]) == 1
+
+ # now xreadgroup yields one empty stream entry and one non-empty stream entry
+ assert glide_sync_client.xreadgroup({key: "0"}, group_name, consumer_name) == {
+ key.encode(): {
+ stream_id1_0.encode(): None,
+ stream_id1_1.encode(): [[b"f1_1", b"v1_1"]],
+ }
+ }
+
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_2", "v1_2")], StreamAddOptions(stream_id1_2)
+ )
+ == stream_id1_2.encode()
+ )
+
+ # delete the consumer group and expect 2 pending messages
+ assert (
+ glide_sync_client.xgroup_del_consumer(key, group_name, consumer_name) == 2
+ )
+
+ # consume the last message with the previously deleted consumer (create the consumer anew)
+ assert glide_sync_client.xreadgroup(
+ {key: ">"},
+ group_name,
+ consumer_name,
+ StreamReadGroupOptions(count=5, block_ms=1000),
+ ) == {key.encode(): {stream_id1_2.encode(): [[b"f1_2", b"v1_2"]]}}
+
+ # delete the consumer group and expect the pending message
+ assert (
+ glide_sync_client.xgroup_del_consumer(key, group_name, consumer_name) == 1
+ )
+
+ # test NOACK option
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_3", "v1_3")], StreamAddOptions(stream_id1_3)
+ )
+ == stream_id1_3.encode()
+ )
+ # since NOACK is passed, stream entry will be consumed without being added to the pending entries
+ assert glide_sync_client.xreadgroup(
+ {key: ">"},
+ group_name,
+ consumer_name,
+ StreamReadGroupOptions(no_ack=True, count=5, block_ms=1000),
+ ) == {key.encode(): {stream_id1_3.encode(): [[b"f1_3", b"v1_3"]]}}
+ assert (
+ glide_sync_client.xreadgroup(
+ {key: ">"},
+ group_name,
+ consumer_name,
+ StreamReadGroupOptions(no_ack=False, count=5, block_ms=1000),
+ )
+ is None
+ )
+ assert glide_sync_client.xreadgroup(
+ {key: "0"},
+ group_name,
+ consumer_name,
+ StreamReadGroupOptions(no_ack=False, count=5, block_ms=1000),
+ ) == {key.encode(): {}}
+
+ # attempting to call XGROUP CREATECONSUMER or XGROUP DELCONSUMER with a non-existing key should raise an error
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_create_consumer(
+ non_existing_key, group_name, consumer_name
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_del_consumer(
+ non_existing_key, group_name, consumer_name
+ )
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_create_consumer(
+ string_key, group_name, consumer_name
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_del_consumer(
+ string_key, group_name, consumer_name
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xreadgroup_edge_cases_and_failures(
+ self, glide_sync_client: GlideSync, cluster_mode, protocol, request
+ ):
+ key = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:{get_random_string(10)}"
+ string_key = f"{{testKey}}:{get_random_string(10)}"
+ group_name = get_random_string(10)
+ consumer_name = get_random_string(10)
+ stream_id0 = "0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+
+ # attempting to execute against a non-existing key results in an error
+ with pytest.raises(RequestError):
+ glide_sync_client.xreadgroup(
+ {non_existing_key: stream_id0}, group_name, consumer_name
+ )
+
+ # create group and consumer for group
+ assert glide_sync_client.xgroup_create(
+ key, group_name, stream_id0, StreamGroupOptions(make_stream=True)
+ )
+ assert (
+ glide_sync_client.xgroup_create_consumer(key, group_name, consumer_name)
+ is True
+ )
+
+ # read from empty stream
+ assert (
+ glide_sync_client.xreadgroup({key: ">"}, group_name, consumer_name) is None
+ )
+ assert glide_sync_client.xreadgroup({key: "0"}, group_name, consumer_name) == {
+ key.encode(): {}
+ }
+
+ # setup first entry
+ assert (
+ glide_sync_client.xadd(key, [("f1", "v1")], StreamAddOptions(stream_id1_1))
+ == stream_id1_1.encode()
+ )
+
+ # if count is non-positive, it is ignored
+ assert glide_sync_client.xreadgroup(
+ {key: ">"}, group_name, consumer_name, StreamReadGroupOptions(count=0)
+ ) == {
+ key.encode(): {
+ stream_id1_1.encode(): [[b"f1", b"v1"]],
+ },
+ }
+ assert glide_sync_client.xreadgroup(
+ {key: stream_id1_0},
+ group_name,
+ consumer_name,
+ StreamReadGroupOptions(count=-1),
+ ) == {
+ key.encode(): {
+ stream_id1_1.encode(): [[b"f1", b"v1"]],
+ },
+ }
+
+ # invalid stream ID
+ with pytest.raises(RequestError):
+ glide_sync_client.xreadgroup(
+ {key: "invalid_stream_id"}, group_name, consumer_name
+ )
+
+ # invalid argument - block cannot be negative
+ with pytest.raises(RequestError):
+ glide_sync_client.xreadgroup(
+ {key: stream_id0},
+ group_name,
+ consumer_name,
+ StreamReadGroupOptions(block_ms=-1),
+ )
+
+ # invalid argument - keys_and_ids must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.xreadgroup({}, group_name, consumer_name)
+
+ # first key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xreadgroup(
+ {string_key: stream_id1_1, key: stream_id1_1}, group_name, consumer_name
+ )
+
+ # second key exists, but it is not a stream
+ with pytest.raises(RequestError):
+ glide_sync_client.xreadgroup(
+ {key: stream_id1_1, string_key: stream_id1_1}, group_name, consumer_name
+ )
+
+ # attempting to execute command with a non-existing group results in an error
+ with pytest.raises(RequestError):
+ glide_sync_client.xreadgroup(
+ {key: stream_id1_1}, "non_existing_group", consumer_name
+ )
+
+ test_sync_client = create_client(
+ request=request, protocol=protocol, cluster_mode=cluster_mode, timeout=900
+ )
+ timeout_key = f"{{testKey}}:{get_random_string(10)}"
+ timeout_group_name = get_random_string(10)
+ timeout_consumer_name = get_random_string(10)
+
+ # create a group read with the test client
+ # add a single stream entry and consumer
+ # the first call to ">" will return and update consumer group
+ # the second call to ">" will block waiting for new entries
+ # using anything other than ">" won't block, but will return the empty consumer result
+ # see: https://github.com/redis/redis/issues/6587
+ assert (
+ test_sync_client.xgroup_create(
+ timeout_key,
+ timeout_group_name,
+ stream_id0,
+ StreamGroupOptions(make_stream=True),
+ )
+ == OK
+ )
+ assert (
+ test_sync_client.xgroup_create_consumer(
+ timeout_key, timeout_group_name, timeout_consumer_name
+ )
+ is True
+ )
+ assert (
+ test_sync_client.xadd(
+ timeout_key, [("f1", "v1")], StreamAddOptions(stream_id1_1)
+ )
+ == stream_id1_1.encode()
+ )
+
+ # read the entire stream for the consumer and mark messages as pending
+ assert test_sync_client.xreadgroup(
+ {timeout_key: ">"}, timeout_group_name, timeout_consumer_name
+ ) == {timeout_key.encode(): {stream_id1_1.encode(): [[b"f1", b"v1"]]}}
+
+ # subsequent calls to read ">" will block
+ assert (
+ test_sync_client.xreadgroup(
+ {timeout_key: ">"},
+ timeout_group_name,
+ timeout_consumer_name,
+ StreamReadGroupOptions(block_ms=1000),
+ )
+ is None
+ )
+
+ # ensure that command doesn't time out even if timeout > request timeout
+ def endless_xreadgroup_call():
+ test_sync_client.xreadgroup(
+ {timeout_key: ">"},
+ timeout_group_name,
+ timeout_consumer_name,
+ StreamReadGroupOptions(block_ms=0),
+ )
+
+ # when xreadgroup is called with a block timeout of 0, it should never timeout, but we wrap the test with a
+ # timeout to avoid the test getting stuck forever.
+ with pytest.raises(asyncio.TimeoutError):
+ asyncio.wait_for(endless_xreadgroup_call(), timeout=3)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xack(
+ self, glide_sync_client: GlideSync, cluster_mode, protocol, request
+ ):
+ key = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:{get_random_string(10)}"
+ string_key = f"{{testKey}}:{get_random_string(10)}"
+ group_name = get_random_string(10)
+ consumer_name = get_random_string(10)
+ stream_id0 = "0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+ stream_id1_2 = "1-2"
+
+ # setup: add 2 entries to the stream, create consumer group, read to mark them as pending
+ assert (
+ glide_sync_client.xadd(key, [("f0", "v0")], StreamAddOptions(stream_id1_0))
+ == stream_id1_0.encode()
+ )
+ assert (
+ glide_sync_client.xadd(key, [("f1", "v1")], StreamAddOptions(stream_id1_1))
+ == stream_id1_1.encode()
+ )
+ assert glide_sync_client.xgroup_create(key, group_name, stream_id0) == OK
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer_name) == {
+ key.encode(): {
+ stream_id1_0.encode(): [[b"f0", b"v0"]],
+ stream_id1_1.encode(): [[b"f1", b"v1"]],
+ }
+ }
+
+ # add one more entry
+ assert (
+ glide_sync_client.xadd(key, [("f2", "v2")], StreamAddOptions(stream_id1_2))
+ == stream_id1_2.encode()
+ )
+
+ # acknowledge the first 2 entries
+ assert (
+ glide_sync_client.xack(key, group_name, [stream_id1_0, stream_id1_1]) == 2
+ )
+ # attempting to acknowledge the first 2 entries again returns 0 since they were already acknowledged
+ assert (
+ glide_sync_client.xack(key, group_name, [stream_id1_0, stream_id1_1]) == 0
+ )
+ # read the last, unacknowledged entry
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer_name) == {
+ key.encode(): {stream_id1_2.encode(): [[b"f2", b"v2"]]}
+ }
+ # deleting the consumer returns 1 since the last entry still hasn't been acknowledged
+ assert (
+ glide_sync_client.xgroup_del_consumer(key, group_name, consumer_name) == 1
+ )
+
+ # attempting to acknowledge a non-existing key returns 0
+ assert (
+ glide_sync_client.xack(non_existing_key, group_name, [stream_id1_0]) == 0
+ )
+ # attempting to acknowledge a non-existing group returns 0
+ assert glide_sync_client.xack(key, "non_existing_group", [stream_id1_0]) == 0
+ # attempting to acknowledge a non-existing ID returns 0
+ assert glide_sync_client.xack(key, group_name, ["99-99"]) == 0
+
+ # invalid arg - ID list must not be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.xack(key, group_name, [])
+
+ # invalid arg - invalid stream ID format
+ with pytest.raises(RequestError):
+ glide_sync_client.xack(key, group_name, ["invalid_ID_format"])
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xack(string_key, group_name, [stream_id1_0])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xpending_xclaim(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ group_name = get_random_string(10)
+ consumer1 = get_random_string(10)
+ consumer2 = get_random_string(10)
+ stream_id0 = "0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+ stream_id1_2 = "1-2"
+ stream_id1_3 = "1-3"
+ stream_id1_4 = "1-4"
+ stream_id1_5 = "1-5"
+
+ # create group and consumer for group
+ assert (
+ glide_sync_client.xgroup_create(
+ key, group_name, stream_id0, StreamGroupOptions(make_stream=True)
+ )
+ == OK
+ )
+ assert (
+ glide_sync_client.xgroup_create_consumer(key, group_name, consumer1)
+ is True
+ )
+ assert (
+ glide_sync_client.xgroup_create_consumer(key, group_name, consumer2)
+ is True
+ )
+
+ # add two stream entries for consumer1
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_0", "v1_0")], StreamAddOptions(stream_id1_0)
+ )
+ == stream_id1_0.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_1", "v1_1")], StreamAddOptions(stream_id1_1)
+ )
+ == stream_id1_1.encode()
+ )
+
+ # read the entire stream with consumer1 and mark messages as pending
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer1) == {
+ key.encode(): {
+ stream_id1_0.encode(): [[b"f1_0", b"v1_0"]],
+ stream_id1_1.encode(): [[b"f1_1", b"v1_1"]],
+ }
+ }
+
+ # add three stream entries for consumer2
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_2", "v1_2")], StreamAddOptions(stream_id1_2)
+ )
+ == stream_id1_2.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_3", "v1_3")], StreamAddOptions(stream_id1_3)
+ )
+ == stream_id1_3.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_4", "v1_4")], StreamAddOptions(stream_id1_4)
+ )
+ == stream_id1_4.encode()
+ )
+
+ # read the entire stream with consumer2 and mark messages as pending
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer2) == {
+ key.encode(): {
+ stream_id1_2.encode(): [[b"f1_2", b"v1_2"]],
+ stream_id1_3.encode(): [[b"f1_3", b"v1_3"]],
+ stream_id1_4.encode(): [[b"f1_4", b"v1_4"]],
+ }
+ }
+
+ # inner array order is non-deterministic, so we have to assert against it separately from the other info
+ result = glide_sync_client.xpending(key, group_name)
+ consumer_results = cast(List, result[3])
+ assert [consumer1.encode(), b"2"] in consumer_results
+ assert [consumer2.encode(), b"3"] in consumer_results
+
+ result.remove(consumer_results)
+ assert result == [5, stream_id1_0.encode(), stream_id1_4.encode()]
+
+ # to ensure an idle_time > 0
+ time.sleep(2)
+ range_result = glide_sync_client.xpending_range(
+ key, group_name, MinId(), MaxId(), 10
+ )
+ # the inner lists of the result have format [stream_entry_id, consumer, idle_time, times_delivered]
+ # because the idle time return value is not deterministic, we have to assert against it separately
+ idle_time = cast(int, range_result[0][2])
+ assert idle_time > 0
+ range_result[0].remove(idle_time)
+ assert range_result[0] == [stream_id1_0.encode(), consumer1.encode(), 1]
+
+ idle_time = cast(int, range_result[1][2])
+ assert idle_time > 0
+ range_result[1].remove(idle_time)
+ assert range_result[1] == [stream_id1_1.encode(), consumer1.encode(), 1]
+
+ idle_time = cast(int, range_result[2][2])
+ assert idle_time > 0
+ range_result[2].remove(idle_time)
+ assert range_result[2] == [stream_id1_2.encode(), consumer2.encode(), 1]
+
+ idle_time = cast(int, range_result[3][2])
+ assert idle_time > 0
+ range_result[3].remove(idle_time)
+ assert range_result[3] == [stream_id1_3.encode(), consumer2.encode(), 1]
+
+ idle_time = cast(int, range_result[4][2])
+ assert idle_time > 0
+ range_result[4].remove(idle_time)
+ assert range_result[4] == [stream_id1_4.encode(), consumer2.encode(), 1]
+
+ # use xclaim to claim stream 2 and 4 for consumer 1
+ assert glide_sync_client.xclaim(
+ key, group_name, consumer1, 0, [stream_id1_2, stream_id1_4]
+ ) == {
+ stream_id1_2.encode(): [[b"f1_2", b"v1_2"]],
+ stream_id1_4.encode(): [[b"f1_4", b"v1_4"]],
+ }
+
+ # claiming non exists id
+ assert (
+ glide_sync_client.xclaim(
+ key, group_name, consumer1, 0, ["1526569498055-0"]
+ )
+ == {}
+ )
+
+ assert glide_sync_client.xclaim_just_id(
+ key, group_name, consumer1, 0, [stream_id1_2, stream_id1_4]
+ ) == [stream_id1_2.encode(), stream_id1_4.encode()]
+
+ # add one more stream
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_5", "v1_5")], StreamAddOptions(stream_id1_5)
+ )
+ == stream_id1_5.encode()
+ )
+
+ # using force, we can xclaim the message without reading it
+ claim_force_result = glide_sync_client.xclaim(
+ key,
+ group_name,
+ consumer2,
+ 0,
+ [stream_id1_5],
+ StreamClaimOptions(retry_count=99, is_force=True),
+ )
+ assert claim_force_result == {stream_id1_5.encode(): [[b"f1_5", b"v1_5"]]}
+
+ force_pending_result = glide_sync_client.xpending_range(
+ key, group_name, IdBound(stream_id1_5), IdBound(stream_id1_5), 1
+ )
+ assert force_pending_result[0][0] == stream_id1_5.encode()
+ assert force_pending_result[0][1] == consumer2.encode()
+ assert force_pending_result[0][3] == 99
+
+ # acknowledge streams 1-1, 1-2, 1-3, 1-5 and remove them from the xpending results
+ assert (
+ glide_sync_client.xack(
+ key,
+ group_name,
+ [stream_id1_1, stream_id1_2, stream_id1_3, stream_id1_5],
+ )
+ == 4
+ )
+
+ range_result = glide_sync_client.xpending_range(
+ key, group_name, IdBound(stream_id1_4), MaxId(), 10
+ )
+ assert len(range_result) == 1
+ assert range_result[0][0] == stream_id1_4.encode()
+ assert range_result[0][1] == consumer1.encode()
+
+ range_result = glide_sync_client.xpending_range(
+ key, group_name, MinId(), IdBound(stream_id1_3), 10
+ )
+ assert len(range_result) == 1
+ assert range_result[0][0] == stream_id1_0.encode()
+ assert range_result[0][1] == consumer1.encode()
+
+ # passing an empty StreamPendingOptions object should have no effect
+ range_result = glide_sync_client.xpending_range(
+ key, group_name, MinId(), IdBound(stream_id1_3), 10, StreamPendingOptions()
+ )
+ assert len(range_result) == 1
+ assert range_result[0][0] == stream_id1_0.encode()
+ assert range_result[0][1] == consumer1.encode()
+
+ range_result = glide_sync_client.xpending_range(
+ key,
+ group_name,
+ MinId(),
+ MaxId(),
+ 10,
+ StreamPendingOptions(min_idle_time_ms=1, consumer_name=consumer1),
+ )
+ # note: streams ID 0-0 and 0-4 are still pending, all others were acknowledged
+ assert len(range_result) == 2
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xpending_edge_cases_and_failures(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ string_key = get_random_string(10)
+ group_name = get_random_string(10)
+ consumer = get_random_string(10)
+ stream_id0 = "0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+
+ # create group and consumer for the group
+ assert (
+ glide_sync_client.xgroup_create(
+ key, group_name, stream_id0, StreamGroupOptions(make_stream=True)
+ )
+ == OK
+ )
+ assert (
+ glide_sync_client.xgroup_create_consumer(key, group_name, consumer) is True
+ )
+
+ # add two stream entries for consumer
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_0", "v1_0")], StreamAddOptions(stream_id1_0)
+ )
+ == stream_id1_0.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_1", "v1_1")], StreamAddOptions(stream_id1_1)
+ )
+ == stream_id1_1.encode()
+ )
+
+ # no pending messages yet...
+ assert glide_sync_client.xpending(key, group_name) == [0, None, None, None]
+ assert (
+ glide_sync_client.xpending_range(key, group_name, MinId(), MaxId(), 10)
+ == []
+ )
+
+ # read the entire stream with consumer and mark messages as pending
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer) == {
+ key.encode(): {
+ stream_id1_0.encode(): [[b"f1_0", b"v1_0"]],
+ stream_id1_1.encode(): [[b"f1_1", b"v1_1"]],
+ }
+ }
+
+ # sanity check - expect some results
+ assert glide_sync_client.xpending(key, group_name) == [
+ 2,
+ stream_id1_0.encode(),
+ stream_id1_1.encode(),
+ [[consumer.encode(), b"2"]],
+ ]
+ result = glide_sync_client.xpending_range(
+ key, group_name, MinId(), MaxId(), 10
+ )
+ assert len(result[0]) > 0
+
+ # returns empty if + before -
+ assert (
+ glide_sync_client.xpending_range(key, group_name, MaxId(), MinId(), 10)
+ == []
+ )
+ assert (
+ glide_sync_client.xpending_range(
+ key,
+ group_name,
+ MaxId(),
+ MinId(),
+ 10,
+ StreamPendingOptions(consumer_name=consumer),
+ )
+ == []
+ )
+
+ # min idle time of 100 seconds shouldn't produce any results
+ assert (
+ glide_sync_client.xpending_range(
+ key,
+ group_name,
+ MinId(),
+ MaxId(),
+ 10,
+ StreamPendingOptions(min_idle_time_ms=100_000),
+ )
+ == []
+ )
+
+ # non-existing consumer: no results
+ assert (
+ glide_sync_client.xpending_range(
+ key,
+ group_name,
+ MinId(),
+ MaxId(),
+ 10,
+ StreamPendingOptions(consumer_name="non_existing_consumer"),
+ )
+ == []
+ )
+
+ # xpending when range bound is not a valid ID raises a RequestError
+ with pytest.raises(RequestError):
+ glide_sync_client.xpending_range(
+ key, group_name, IdBound("invalid_stream_id_format"), MaxId(), 10
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xpending_range(
+ key, group_name, MinId(), IdBound("invalid_stream_id_format"), 10
+ )
+
+ # non-positive count returns no results
+ assert (
+ glide_sync_client.xpending_range(key, group_name, MinId(), MaxId(), -10)
+ == []
+ )
+ assert (
+ glide_sync_client.xpending_range(key, group_name, MinId(), MaxId(), 0)
+ == []
+ )
+
+ # non-positive min-idle-time values are allowed
+ result = glide_sync_client.xpending_range(
+ key,
+ group_name,
+ MinId(),
+ MaxId(),
+ 10,
+ StreamPendingOptions(min_idle_time_ms=-100),
+ )
+ assert len(result[0]) > 0
+ result = glide_sync_client.xpending_range(
+ key,
+ group_name,
+ MinId(),
+ MaxId(),
+ 10,
+ StreamPendingOptions(min_idle_time_ms=0),
+ )
+ assert len(result[0]) > 0
+
+ # non-existing group name raises a RequestError (NOGROUP)
+ with pytest.raises(RequestError):
+ glide_sync_client.xpending(key, "non_existing_group")
+ with pytest.raises(RequestError):
+ glide_sync_client.xpending_range(
+ key, "non_existing_group", MinId(), MaxId(), 10
+ )
+
+ # non-existing key raises a RequestError
+ with pytest.raises(RequestError):
+ glide_sync_client.xpending(non_existing_key, group_name)
+ with pytest.raises(RequestError):
+ glide_sync_client.xpending_range(
+ non_existing_key, group_name, MinId(), MaxId(), 10
+ )
+
+ # key exists but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xpending(string_key, group_name)
+ with pytest.raises(RequestError):
+ glide_sync_client.xpending_range(
+ string_key, group_name, MinId(), MaxId(), 10
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xclaim_edge_cases_and_failures(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ string_key = get_random_string(10)
+ group_name = get_random_string(10)
+ consumer = get_random_string(10)
+ stream_id0 = "0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+
+ # create group and consumer for the group
+ assert (
+ glide_sync_client.xgroup_create(
+ key, group_name, stream_id0, StreamGroupOptions(make_stream=True)
+ )
+ == OK
+ )
+ assert (
+ glide_sync_client.xgroup_create_consumer(key, group_name, consumer) is True
+ )
+
+ # Add stream entry and mark as pending:
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_0", "v1_0")], StreamAddOptions(stream_id1_0)
+ )
+ == stream_id1_0.encode()
+ )
+
+ # read the entire stream with consumer and mark messages as pending
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer) == {
+ key.encode(): {stream_id1_0.encode(): [[b"f1_0", b"v1_0"]]}
+ }
+
+ # claim with invalid stream entry IDs
+ with pytest.raises(RequestError):
+ glide_sync_client.xclaim_just_id(key, group_name, consumer, 1, ["invalid"])
+
+ # claim with empty stream entry IDs returns no results
+ empty_claim = glide_sync_client.xclaim_just_id(
+ key, group_name, consumer, 1, []
+ )
+ assert len(empty_claim) == 0
+
+ claim_options = StreamClaimOptions(idle=1)
+
+ # non-existent key throws a RequestError (NOGROUP)
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.xclaim(
+ non_existing_key, group_name, consumer, 1, [stream_id1_0]
+ )
+ assert "NOGROUP" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.xclaim(
+ non_existing_key,
+ group_name,
+ consumer,
+ 1,
+ [stream_id1_0],
+ claim_options,
+ )
+ assert "NOGROUP" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.xclaim_just_id(
+ non_existing_key, group_name, consumer, 1, [stream_id1_0]
+ )
+ assert "NOGROUP" in str(e)
+
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.xclaim_just_id(
+ non_existing_key,
+ group_name,
+ consumer,
+ 1,
+ [stream_id1_0],
+ claim_options,
+ )
+ assert "NOGROUP" in str(e)
+
+ # key exists but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xclaim(
+ string_key, group_name, consumer, 1, [stream_id1_0]
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xclaim(
+ string_key, group_name, consumer, 1, [stream_id1_0], claim_options
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xclaim_just_id(
+ string_key, group_name, consumer, 1, [stream_id1_0]
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xclaim_just_id(
+ string_key, group_name, consumer, 1, [stream_id1_0], claim_options
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xautoclaim(self, glide_sync_client: GlideSync, protocol):
+ min_version = "6.2.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ version7_or_above = False
+ else:
+ version7_or_above = True
+
+ key = get_random_string(10)
+ group_name = get_random_string(10)
+ consumer = get_random_string(10)
+ stream_id0_0 = "0-0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+ stream_id1_2 = "1-2"
+ stream_id1_3 = "1-3"
+
+ # setup: add stream entries, create consumer group, add entries to Pending Entries List for group
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1", "v1"), ("f2", "v2")], StreamAddOptions(stream_id1_0)
+ )
+ == stream_id1_0.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_1", "v1_1")], StreamAddOptions(stream_id1_1)
+ )
+ == stream_id1_1.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_2", "v1_2")], StreamAddOptions(stream_id1_2)
+ )
+ == stream_id1_2.encode()
+ )
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1_3", "v1_3")], StreamAddOptions(stream_id1_3)
+ )
+ == stream_id1_3.encode()
+ )
+ assert glide_sync_client.xgroup_create(key, group_name, stream_id0_0) == OK
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer) == {
+ key.encode(): {
+ stream_id1_0.encode(): [[b"f1", b"v1"], [b"f2", b"v2"]],
+ stream_id1_1.encode(): [[b"f1_1", b"v1_1"]],
+ stream_id1_2.encode(): [[b"f1_2", b"v1_2"]],
+ stream_id1_3.encode(): [[b"f1_3", b"v1_3"]],
+ }
+ }
+
+ # autoclaim the first entry only
+ result = glide_sync_client.xautoclaim(
+ key, group_name, consumer, 0, stream_id0_0, count=1
+ )
+ assert result[0] == stream_id1_1.encode()
+ assert result[1] == {stream_id1_0.encode(): [[b"f1", b"v1"], [b"f2", b"v2"]]}
+ # if using Valkey 7.0.0 or above, responses also include a list of entry IDs that were removed from the Pending
+ # Entries List because they no longer exist in the stream
+ if version7_or_above:
+ assert result[2] == []
+
+ # delete entry 1-2
+ assert glide_sync_client.xdel(key, [stream_id1_2])
+
+ # autoclaim the rest of the entries
+ result = glide_sync_client.xautoclaim(
+ key, group_name, consumer, 0, stream_id1_1
+ )
+ assert (
+ result[0] == stream_id0_0.encode()
+ ) # "0-0" is returned to indicate the entire stream was scanned.
+ assert result[1] == {
+ stream_id1_1.encode(): [[b"f1_1", b"v1_1"]],
+ stream_id1_3.encode(): [[b"f1_3", b"v1_3"]],
+ }
+ if version7_or_above:
+ assert result[2] == [stream_id1_2.encode()]
+
+ # autoclaim with JUSTID: result at index 1 does not contain fields/values of the claimed entries, only IDs
+ just_id_result = glide_sync_client.xautoclaim_just_id(
+ key, group_name, consumer, 0, stream_id0_0
+ )
+ assert just_id_result[0] == stream_id0_0.encode()
+ if version7_or_above:
+ assert just_id_result[1] == [
+ stream_id1_0.encode(),
+ stream_id1_1.encode(),
+ stream_id1_3.encode(),
+ ]
+ assert just_id_result[2] == []
+ else:
+ # in Valkey < 7.0.0, specifically for XAUTOCLAIM with JUSTID, entry IDs that were in the Pending Entries List
+ # but are no longer in the stream still show up in the response
+ assert just_id_result[1] == [
+ stream_id1_0.encode(),
+ stream_id1_1.encode(),
+ stream_id1_2.encode(),
+ stream_id1_3.encode(),
+ ]
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xautoclaim_edge_cases_and_failures(
+ self, glide_sync_client: GlideSync, protocol
+ ):
+ min_version = "6.2.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ version7_or_above = False
+ else:
+ version7_or_above = True
+
+ key = get_random_string(10)
+ string_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ group_name = get_random_string(10)
+ consumer = get_random_string(10)
+ stream_id0_0 = "0-0"
+ stream_id1_0 = "1-0"
+
+ # setup: add entry, create consumer group, add entry to Pending Entries List for group
+ assert (
+ glide_sync_client.xadd(key, [("f1", "v1")], StreamAddOptions(stream_id1_0))
+ == stream_id1_0.encode()
+ )
+ assert glide_sync_client.xgroup_create(key, group_name, stream_id0_0) == OK
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer) == {
+ key.encode(): {stream_id1_0.encode(): [[b"f1", b"v1"]]}
+ }
+
+ # passing a non-existing key is not allowed and will raise an error
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim(
+ non_existing_key, group_name, consumer, 0, stream_id0_0
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim_just_id(
+ non_existing_key, group_name, consumer, 0, stream_id0_0
+ )
+
+ # passing a non-existing group is not allowed and will raise an error
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim(
+ key, "non_existing_group", consumer, 0, stream_id0_0
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim_just_id(
+ key, "non_existing_group", consumer, 0, stream_id0_0
+ )
+
+ # non-existing consumers are created automatically
+ result = glide_sync_client.xautoclaim(
+ key, group_name, "non_existing_consumer", 0, stream_id0_0
+ )
+ assert result[0] == stream_id0_0.encode()
+ assert result[1] == {stream_id1_0.encode(): [[b"f1", b"v1"]]}
+ # if using Valkey 7.0.0 or above, responses also include a list of entry IDs that were removed from the Pending
+ # Entries List because they no longer exist in the stream
+ if version7_or_above:
+ assert result[2] == []
+
+ just_id_result = glide_sync_client.xautoclaim_just_id(
+ key, group_name, "non_existing_consumer", 0, stream_id0_0
+ )
+ assert just_id_result[0] == stream_id0_0.encode()
+ assert just_id_result[1] == [stream_id1_0.encode()]
+ if version7_or_above:
+ assert just_id_result[2] == []
+
+ # negative min_idle_time_ms values are allowed
+ result = glide_sync_client.xautoclaim(
+ key, group_name, consumer, -1, stream_id0_0
+ )
+ assert result[0] == stream_id0_0.encode()
+ assert result[1] == {stream_id1_0.encode(): [[b"f1", b"v1"]]}
+ if version7_or_above:
+ assert result[2] == []
+
+ just_id_result = glide_sync_client.xautoclaim_just_id(
+ key, group_name, consumer, -1, stream_id0_0
+ )
+ assert just_id_result[0] == stream_id0_0.encode()
+ assert just_id_result[1] == [stream_id1_0.encode()]
+ if version7_or_above:
+ assert just_id_result[2] == []
+
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim(
+ key, group_name, consumer, 0, "invalid_stream_id"
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim_just_id(
+ key, group_name, consumer, 0, "invalid_stream_id"
+ )
+
+ # no stream entries to claim above the given start value
+ result = glide_sync_client.xautoclaim(key, group_name, consumer, 0, "99-99")
+ assert result[0] == stream_id0_0.encode()
+ assert result[1] == {}
+ if version7_or_above:
+ assert result[2] == []
+
+ just_id_result = glide_sync_client.xautoclaim_just_id(
+ key, group_name, consumer, 0, "99-99"
+ )
+ assert just_id_result[0] == stream_id0_0.encode()
+ assert just_id_result[1] == []
+ if version7_or_above:
+ assert just_id_result[2] == []
+
+ # invalid arg - count must be positive
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim(
+ key, group_name, consumer, 0, stream_id0_0, count=0
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim_just_id(
+ key, group_name, consumer, 0, stream_id0_0, count=0
+ )
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim(
+ string_key, group_name, consumer, 0, stream_id0_0
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.xautoclaim_just_id(
+ string_key, group_name, consumer, 0, stream_id0_0
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xinfo_groups_xinfo_consumers(
+ self, glide_sync_client: GlideSync, protocol
+ ):
+ key = get_random_string(10)
+ group_name1 = get_random_string(10)
+ group_name2 = get_random_string(10)
+ consumer1 = get_random_string(10)
+ consumer2 = get_random_string(10)
+ stream_id0_0 = "0-0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+ stream_id1_2 = "1-2"
+ stream_id1_3 = "1-3"
+
+ # setup: add 3 entries to stream, create consumer group and consumer1, read 1 entry from stream with consumer1
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1", "v1"), ("f2", "v2")], StreamAddOptions(stream_id1_0)
+ )
+ == stream_id1_0.encode()
+ )
+ assert (
+ glide_sync_client.xadd(key, [("f3", "v3")], StreamAddOptions(stream_id1_1))
+ == stream_id1_1.encode()
+ )
+ assert (
+ glide_sync_client.xadd(key, [("f4", "v4")], StreamAddOptions(stream_id1_2))
+ == stream_id1_2.encode()
+ )
+ assert glide_sync_client.xgroup_create(key, group_name1, stream_id0_0) == OK
+ assert glide_sync_client.xreadgroup(
+ {key: ">"}, group_name1, consumer1, StreamReadGroupOptions(count=1)
+ ) == {key.encode(): {stream_id1_0.encode(): [[b"f1", b"v1"], [b"f2", b"v2"]]}}
+
+ # sleep to ensure the idle time value and inactive time value returned by xinfo_consumers is > 0
+ time.sleep(2)
+ consumers_result = glide_sync_client.xinfo_consumers(key, group_name1)
+ assert len(consumers_result) == 1
+ consumer1_info = consumers_result[0]
+ assert consumer1_info.get(b"name") == consumer1.encode()
+ assert consumer1_info.get(b"pending") == 1
+ assert cast(int, consumer1_info.get(b"idle")) > 0
+ if not check_if_server_version_lt(glide_sync_client, "7.2.0"):
+ assert (
+ cast(int, consumer1_info.get(b"inactive"))
+ > 0 # "inactive" was added in Valkey 7.2.0
+ )
+
+ # create consumer2 and read the rest of the entries with it
+ assert (
+ glide_sync_client.xgroup_create_consumer(key, group_name1, consumer2)
+ is True
+ )
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name1, consumer2) == {
+ key.encode(): {
+ stream_id1_1.encode(): [[b"f3", b"v3"]],
+ stream_id1_2.encode(): [[b"f4", b"v4"]],
+ }
+ }
+
+ # verify that xinfo_consumers contains info for 2 consumers now
+ # test with byte string args
+ consumers_result = glide_sync_client.xinfo_consumers(
+ key.encode(), group_name1.encode()
+ )
+ assert len(consumers_result) == 2
+
+ # add one more entry
+ assert (
+ glide_sync_client.xadd(key, [("f5", "v5")], StreamAddOptions(stream_id1_3))
+ == stream_id1_3.encode()
+ )
+
+ groups = glide_sync_client.xinfo_groups(key)
+ assert len(groups) == 1
+ group1_info = groups[0]
+ assert group1_info.get(b"name") == group_name1.encode()
+ assert group1_info.get(b"consumers") == 2
+ assert group1_info.get(b"pending") == 3
+ assert group1_info.get(b"last-delivered-id") == stream_id1_2.encode()
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert (
+ group1_info.get(b"entries-read")
+ == 3 # we have read stream entries 1-0, 1-1, and 1-2
+ )
+ assert (
+ group1_info.get(b"lag")
+ == 1 # we still have not read one entry in the stream, entry 1-3
+ )
+
+ # verify xgroup_set_id effects the returned value from xinfo_groups
+ assert glide_sync_client.xgroup_set_id(key, group_name1, stream_id1_1) == OK
+ # test with byte string arg
+ groups = glide_sync_client.xinfo_groups(key.encode())
+ assert len(groups) == 1
+ group1_info = groups[0]
+ assert group1_info.get(b"name") == group_name1.encode()
+ assert group1_info.get(b"consumers") == 2
+ assert group1_info.get(b"pending") == 3
+ assert group1_info.get(b"last-delivered-id") == stream_id1_1.encode()
+ # entries-read and lag were added to the result in 7.0.0
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert (
+ group1_info.get(b"entries-read")
+ is None # gets set to None when we change the last delivered ID
+ )
+ assert (
+ group1_info.get(b"lag")
+ is None # gets set to None when we change the last delivered ID
+ )
+
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ # verify xgroup_set_id with entries_read effects the returned value from xinfo_groups
+ assert (
+ glide_sync_client.xgroup_set_id(
+ key, group_name1, stream_id1_1, entries_read=1
+ )
+ == OK
+ )
+ groups = glide_sync_client.xinfo_groups(key)
+ assert len(groups) == 1
+ group1_info = groups[0]
+ assert group1_info.get(b"name") == group_name1.encode()
+ assert group1_info.get(b"consumers") == 2
+ assert group1_info.get(b"pending") == 3
+ assert group1_info.get(b"last-delivered-id") == stream_id1_1.encode()
+ assert group1_info.get(b"entries-read") == 1
+ assert (
+ group1_info.get(b"lag")
+ == 3 # lag is calculated as number of stream entries minus entries-read
+ )
+
+ # add one more consumer group
+ assert glide_sync_client.xgroup_create(key, group_name2, stream_id0_0) == OK
+
+ # verify that xinfo_groups contains info for 2 consumer groups now
+ groups = glide_sync_client.xinfo_groups(key)
+ assert len(groups) == 2
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xinfo_groups_xinfo_consumers_edge_cases_and_failures(
+ self, glide_sync_client: GlideSync, protocol
+ ):
+ key = get_random_string(10)
+ string_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ group_name = get_random_string(10)
+ stream_id1_0 = "1-0"
+
+ # passing a non-existing key raises an error
+ with pytest.raises(RequestError):
+ glide_sync_client.xinfo_groups(non_existing_key)
+ with pytest.raises(RequestError):
+ glide_sync_client.xinfo_consumers(non_existing_key, group_name)
+
+ assert (
+ glide_sync_client.xadd(
+ key, [("f1", "v1"), ("f2", "v2")], StreamAddOptions(stream_id1_0)
+ )
+ == stream_id1_0.encode()
+ )
+
+ # passing a non-existing group raises an error
+ with pytest.raises(RequestError):
+ glide_sync_client.xinfo_consumers(key, "non_existing_group")
+
+ # no groups exist yet
+ assert glide_sync_client.xinfo_groups(key) == []
+
+ assert glide_sync_client.xgroup_create(key, group_name, stream_id1_0) == OK
+ # no consumers exist yet
+ assert glide_sync_client.xinfo_consumers(key, group_name) == []
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xinfo_groups(string_key)
+ with pytest.raises(RequestError):
+ glide_sync_client.xinfo_consumers(string_key, group_name)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xinfo_stream(
+ self, glide_sync_client: GlideSync, cluster_mode, protocol
+ ):
+ key = get_random_string(10)
+ group_name = get_random_string(10)
+ consumer = get_random_string(10)
+ stream_id0_0 = "0-0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+
+ # setup: add stream entry, create consumer group and consumer, read from stream with consumer
+ assert (
+ glide_sync_client.xadd(
+ key, [("a", "b"), ("c", "d")], StreamAddOptions(stream_id1_0)
+ )
+ == stream_id1_0.encode()
+ )
+ assert glide_sync_client.xgroup_create(key, group_name, stream_id0_0) == OK
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer) == {
+ key.encode(): {stream_id1_0.encode(): [[b"a", b"b"], [b"c", b"d"]]}
+ }
+
+ result = glide_sync_client.xinfo_stream(key)
+ assert result.get(b"length") == 1
+ expected_first_entry = [stream_id1_0.encode(), [b"a", b"b", b"c", b"d"]]
+ assert result.get(b"first-entry") == expected_first_entry
+
+ # only one entry exists, so first and last entry should be the same
+ assert result.get(b"last-entry") == expected_first_entry
+
+ # call XINFO STREAM with a byte string arg
+ result2 = glide_sync_client.xinfo_stream(key.encode())
+ assert result2 == result
+
+ # add one more entry
+ assert (
+ glide_sync_client.xadd(
+ key, [("foo", "bar")], StreamAddOptions(stream_id1_1)
+ )
+ == stream_id1_1.encode()
+ )
+
+ result_full = glide_sync_client.xinfo_stream_full(key, count=1)
+ assert result_full.get(b"length") == 2
+ entries = cast(list, result_full.get(b"entries"))
+ # only the first entry will be returned since we passed count=1
+ assert len(entries) == 1
+ assert entries[0] == expected_first_entry
+
+ groups = cast(list, result_full.get(b"groups"))
+ assert len(groups) == 1
+ group_info = groups[0]
+ assert group_info.get(b"name") == group_name.encode()
+ pending = group_info.get(b"pending")
+ assert len(pending) == 1
+ assert stream_id1_0.encode() in pending[0]
+
+ consumers = group_info.get(b"consumers")
+ assert len(consumers) == 1
+ consumer_info = consumers[0]
+ assert consumer_info.get(b"name") == consumer.encode()
+ consumer_pending = consumer_info.get(b"pending")
+ assert len(consumer_pending) == 1
+ assert stream_id1_0.encode() in consumer_pending[0]
+
+ # call XINFO STREAM FULL with byte arg
+ result_full2 = glide_sync_client.xinfo_stream_full(key.encode())
+ # 2 entries should be returned, since we didn't pass the COUNT arg this time
+ assert len(cast(list, result_full2.get(b"entries"))) == 2
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xinfo_stream_edge_cases_and_failures(
+ self, glide_sync_client: GlideSync, cluster_mode, protocol
+ ):
+ key = get_random_string(10)
+ string_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ stream_id1_0 = "1-0"
+
+ # setup: create empty stream
+ assert (
+ glide_sync_client.xadd(
+ key, [("field", "value")], StreamAddOptions(stream_id1_0)
+ )
+ == stream_id1_0.encode()
+ )
+ assert glide_sync_client.xdel(key, [stream_id1_0]) == 1
+
+ # XINFO STREAM called against empty stream
+ result = glide_sync_client.xinfo_stream(key)
+ assert result.get(b"length") == 0
+ assert result.get(b"first-entry") is None
+ assert result.get(b"last-entry") is None
+
+ # XINFO STREAM FULL called against empty stream. Negative count values are ignored.
+ result_full = glide_sync_client.xinfo_stream_full(key, count=-3)
+ assert result_full.get(b"length") == 0
+ assert result_full.get(b"entries") == []
+ assert result_full.get(b"groups") == []
+
+ # calling XINFO STREAM with a non-existing key raises an error
+ with pytest.raises(RequestError):
+ glide_sync_client.xinfo_stream(non_existing_key)
+ with pytest.raises(RequestError):
+ glide_sync_client.xinfo_stream_full(non_existing_key)
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo")
+ with pytest.raises(RequestError):
+ glide_sync_client.xinfo_stream(string_key)
+ with pytest.raises(RequestError):
+ glide_sync_client.xinfo_stream_full(string_key)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_xgroup_set_id(
+ self, glide_sync_client: GlideSync, cluster_mode, protocol, request
+ ):
+ key = f"{{testKey}}:{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:{get_random_string(10)}"
+ string_key = f"{{testKey}}:{get_random_string(10)}"
+ group_name = get_random_string(10)
+ consumer_name = get_random_string(10)
+ stream_id0 = "0"
+ stream_id1_0 = "1-0"
+ stream_id1_1 = "1-1"
+ stream_id1_2 = "1-2"
+
+ # setup: create stream with 3 entries, create consumer group, read entries to add them to the Pending Entries
+ # List
+ assert (
+ glide_sync_client.xadd(key, [("f0", "v0")], StreamAddOptions(stream_id1_0))
+ == stream_id1_0.encode()
+ )
+ assert (
+ glide_sync_client.xadd(key, [("f1", "v1")], StreamAddOptions(stream_id1_1))
+ == stream_id1_1.encode()
+ )
+ assert (
+ glide_sync_client.xadd(key, [("f2", "v2")], StreamAddOptions(stream_id1_2))
+ == stream_id1_2.encode()
+ )
+ assert glide_sync_client.xgroup_create(key, group_name, stream_id0) == OK
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer_name) == {
+ key.encode(): {
+ stream_id1_0.encode(): [[b"f0", b"v0"]],
+ stream_id1_1.encode(): [[b"f1", b"v1"]],
+ stream_id1_2.encode(): [[b"f2", b"v2"]],
+ }
+ }
+ # sanity check: xreadgroup should not return more entries since they're all already in the Pending Entries List
+ assert (
+ glide_sync_client.xreadgroup({key: ">"}, group_name, consumer_name) is None
+ )
+
+ # reset the last delivered ID for the consumer group to "1-1"
+ # ENTRIESREAD is only supported in Valkey version 7.0.0 and above
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert glide_sync_client.xgroup_set_id(key, group_name, stream_id1_1) == OK
+ else:
+ assert (
+ glide_sync_client.xgroup_set_id(
+ key, group_name, stream_id1_1, entries_read=0
+ )
+ == OK
+ )
+
+ # xreadgroup should only return entry 1-2 since we reset the last delivered ID to 1-1
+ assert glide_sync_client.xreadgroup({key: ">"}, group_name, consumer_name) == {
+ key.encode(): {
+ stream_id1_2.encode(): [[b"f2", b"v2"]],
+ }
+ }
+
+ # an error is raised if XGROUP SETID is called with a non-existing key
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_set_id(non_existing_key, group_name, stream_id0)
+
+ # an error is raised if XGROUP SETID is called with a non-existing group
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_set_id(key, "non_existing_group", stream_id0)
+
+ # setting the ID to a non-existing ID is allowed
+ assert glide_sync_client.xgroup_set_id(key, group_name, "99-99") == OK
+
+ # key exists, but it is not a stream
+ assert glide_sync_client.set(string_key, "foo") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.xgroup_set_id(string_key, group_name, stream_id0)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_pfadd(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ assert glide_sync_client.pfadd(key, []) == 1
+ assert glide_sync_client.pfadd(key, ["one", "two"]) == 1
+ assert glide_sync_client.pfadd(key, ["two"]) == 0
+ assert glide_sync_client.pfadd(key, []) == 0
+
+ assert glide_sync_client.set("foo", "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.pfadd("foo", [])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_pfcount(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ key3 = f"{{testKey}}:3-{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ assert glide_sync_client.pfadd(key1, ["a", "b", "c"]) == 1
+ assert glide_sync_client.pfadd(key2, ["b", "c", "d"]) == 1
+ assert glide_sync_client.pfcount([key1]) == 3
+ assert glide_sync_client.pfcount([key2]) == 3
+ assert glide_sync_client.pfcount([key1, key2]) == 4
+ assert glide_sync_client.pfcount([key1, key2, non_existing_key]) == 4
+ # empty HyperLogLog data set
+ assert glide_sync_client.pfadd(key3, []) == 1
+ assert glide_sync_client.pfcount([key3]) == 0
+
+ # incorrect argument - key list cannot be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.pfcount([])
+
+ # key exists, but it is not a HyperLogLog
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.pfcount([string_key])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_pfmerge(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ key3 = f"{{testKey}}:3-{get_random_string(10)}"
+ string_key = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key = f"{{testKey}}:5-{get_random_string(10)}"
+
+ assert glide_sync_client.pfadd(key1, ["a", "b", "c"]) == 1
+ assert glide_sync_client.pfadd(key2, ["b", "c", "d"]) == 1
+
+ # merge into new HyperLogLog data set
+ assert glide_sync_client.pfmerge(key3, [key1, key2]) == OK
+ assert glide_sync_client.pfcount([key3]) == 4
+
+ # merge into existing HyperLogLog data set
+ assert glide_sync_client.pfmerge(key1, [key2]) == OK
+ assert glide_sync_client.pfcount([key1]) == 4
+
+ # non-existing source key
+ assert glide_sync_client.pfmerge(key2, [key1, non_existing_key]) == OK
+ assert glide_sync_client.pfcount([key2]) == 4
+
+ # empty source key list
+ assert glide_sync_client.pfmerge(key1, []) == OK
+ assert glide_sync_client.pfcount([key1]) == 4
+
+ # source key exists, but it is not a HyperLogLog
+ assert glide_sync_client.set(string_key, "foo")
+ with pytest.raises(RequestError):
+ assert glide_sync_client.pfmerge(key3, [string_key])
+
+ # destination key exists, but it is not a HyperLogLog
+ with pytest.raises(RequestError):
+ assert glide_sync_client.pfmerge(string_key, [key3])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_bitcount(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ set_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ value = "foobar"
+
+ assert glide_sync_client.set(key1, value) == OK
+ assert glide_sync_client.bitcount(key1) == 26
+ assert glide_sync_client.bitcount(key1, OffsetOptions(1, 1)) == 6
+ assert glide_sync_client.bitcount(key1, OffsetOptions(0, -5)) == 10
+ assert glide_sync_client.bitcount(non_existing_key, OffsetOptions(5, 30)) == 0
+ assert glide_sync_client.bitcount(non_existing_key) == 0
+
+ # key exists, but it is not a string
+ assert glide_sync_client.sadd(set_key, [value]) == 1
+ with pytest.raises(RequestError):
+ glide_sync_client.bitcount(set_key)
+ with pytest.raises(RequestError):
+ glide_sync_client.bitcount(set_key, OffsetOptions(1, 1))
+
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ # exception thrown because BIT and BYTE options were implemented after 7.0.0
+ with pytest.raises(RequestError):
+ glide_sync_client.bitcount(
+ key1, OffsetOptions(2, 5, BitmapIndexType.BYTE)
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.bitcount(
+ key1, OffsetOptions(2, 5, BitmapIndexType.BIT)
+ )
+ else:
+ assert (
+ glide_sync_client.bitcount(
+ key1, OffsetOptions(2, 5, BitmapIndexType.BYTE)
+ )
+ == 16
+ )
+ assert (
+ glide_sync_client.bitcount(
+ key1, OffsetOptions(5, 30, BitmapIndexType.BIT)
+ )
+ == 17
+ )
+ assert (
+ glide_sync_client.bitcount(
+ key1, OffsetOptions(5, -5, BitmapIndexType.BIT)
+ )
+ == 23
+ )
+ assert (
+ glide_sync_client.bitcount(
+ non_existing_key, OffsetOptions(5, 30, BitmapIndexType.BIT)
+ )
+ == 0
+ )
+
+ # key exists but it is not a string
+ with pytest.raises(RequestError):
+ glide_sync_client.bitcount(
+ set_key, OffsetOptions(1, 1, BitmapIndexType.BIT)
+ )
+
+ if check_if_server_version_lt(glide_sync_client, "8.0.0"):
+ # exception thrown optional end was implemented after 8.0.0
+ with pytest.raises(RequestError):
+ glide_sync_client.bitcount(
+ key1,
+ OffsetOptions(
+ 2,
+ ),
+ )
+ else:
+ assert glide_sync_client.bitcount(key1, OffsetOptions(0)) == 26
+ assert glide_sync_client.bitcount(key1, OffsetOptions(5)) == 4
+ assert glide_sync_client.bitcount(key1, OffsetOptions(80)) == 0
+ assert glide_sync_client.bitcount(non_existing_key, OffsetOptions(5)) == 0
+
+ # key exists but it is not a string
+ with pytest.raises(RequestError):
+ glide_sync_client.bitcount(set_key, OffsetOptions(1))
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_setbit(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ set_key = get_random_string(10)
+
+ assert glide_sync_client.setbit(key, 0, 1) == 0
+ assert glide_sync_client.setbit(key, 0, 0) == 1
+
+ # invalid argument - offset can't be negative
+ with pytest.raises(RequestError):
+ assert glide_sync_client.setbit(key, -1, 0) == 1
+
+ # key exists, but it is not a string
+ assert glide_sync_client.sadd(set_key, ["foo"]) == 1
+ with pytest.raises(RequestError):
+ glide_sync_client.setbit(set_key, 0, 0)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_getbit(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ set_key = get_random_string(10)
+ value = "foobar"
+
+ assert glide_sync_client.set(key, value) == OK
+ assert glide_sync_client.getbit(key, 1) == 1
+ # When offset is beyond the string length, the string is assumed to be a contiguous space with 0 bits.
+ assert glide_sync_client.getbit(key, 1000) == 0
+ # When key does not exist it is assumed to be an empty string, so offset is always out of range and the value is
+ # also assumed to be a contiguous space with 0 bits.
+ assert glide_sync_client.getbit(non_existing_key, 1) == 0
+
+ # invalid argument - offset can't be negative
+ with pytest.raises(RequestError):
+ assert glide_sync_client.getbit(key, -1) == 1
+
+ # key exists, but it is not a string
+ assert glide_sync_client.sadd(set_key, ["foo"]) == 1
+ with pytest.raises(RequestError):
+ glide_sync_client.getbit(set_key, 0)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_bitpos(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ set_key = get_random_string(10)
+ value = (
+ "?f0obar" # 00111111 01100110 00110000 01101111 01100010 01100001 01110010
+ )
+
+ assert glide_sync_client.set(key, value) == OK
+ assert glide_sync_client.bitpos(key, 0) == 0
+ assert glide_sync_client.bitpos(key, 1) == 2
+ assert glide_sync_client.bitpos(key, 1, OffsetOptions(1)) == 9
+ assert glide_sync_client.bitpos(key, 0, OffsetOptions(3, 5)) == 24
+
+ # `BITPOS` returns -1 for non-existing strings
+ assert glide_sync_client.bitpos(non_existing_key, 1) == -1
+ assert glide_sync_client.bitpos(non_existing_key, 1, OffsetOptions(3, 5)) == -1
+
+ # invalid argument - bit value must be 0 or 1
+ with pytest.raises(RequestError):
+ glide_sync_client.bitpos(key, 2)
+ with pytest.raises(RequestError):
+ glide_sync_client.bitpos(key, 2, OffsetOptions(3, 5))
+
+ # key exists, but it is not a string
+ assert glide_sync_client.sadd(set_key, [value]) == 1
+ with pytest.raises(RequestError):
+ glide_sync_client.bitpos(set_key, 1)
+ with pytest.raises(RequestError):
+ glide_sync_client.bitpos(set_key, 1, OffsetOptions(1, -1))
+
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ # error thrown because BIT and BYTE options were implemented after 7.0.0
+ with pytest.raises(RequestError):
+ glide_sync_client.bitpos(
+ key, 1, OffsetOptions(1, -1, BitmapIndexType.BYTE)
+ )
+ with pytest.raises(RequestError):
+ glide_sync_client.bitpos(
+ key, 1, OffsetOptions(1, -1, BitmapIndexType.BIT)
+ )
+ else:
+ assert (
+ glide_sync_client.bitpos(
+ key, 0, OffsetOptions(3, 5, BitmapIndexType.BYTE)
+ )
+ == 24
+ )
+ assert (
+ glide_sync_client.bitpos(
+ key, 1, OffsetOptions(43, -2, BitmapIndexType.BIT)
+ )
+ == 47
+ )
+ assert (
+ glide_sync_client.bitpos(
+ non_existing_key, 1, OffsetOptions(3, 5, BitmapIndexType.BYTE)
+ )
+ == -1
+ )
+ assert (
+ glide_sync_client.bitpos(
+ non_existing_key, 1, OffsetOptions(3, 5, BitmapIndexType.BIT)
+ )
+ == -1
+ )
+
+ # key exists, but it is not a string
+ with pytest.raises(RequestError):
+ glide_sync_client.bitpos(
+ set_key, 1, OffsetOptions(1, -1, BitmapIndexType.BIT)
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_bitop(self, glide_sync_client: GlideSync):
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ keys: List[TEncodable] = [key1, key2]
+ destination: TEncodable = f"{{testKey}}:3-{get_random_string(10)}"
+ non_existing_key1 = f"{{testKey}}:4-{get_random_string(10)}"
+ non_existing_key2 = f"{{testKey}}:5-{get_random_string(10)}"
+ non_existing_keys: List[TEncodable] = [non_existing_key1, non_existing_key2]
+ set_key = f"{{testKey}}:6-{get_random_string(10)}"
+ value1 = "foobar"
+ value2 = "abcdef"
+
+ assert glide_sync_client.set(key1, value1) == OK
+ assert glide_sync_client.set(key2, value2) == OK
+ assert glide_sync_client.bitop(BitwiseOperation.AND, destination, keys) == 6
+ assert glide_sync_client.get(destination) == b"`bc`ab"
+ assert glide_sync_client.bitop(BitwiseOperation.OR, destination, keys) == 6
+ assert glide_sync_client.get(destination) == b"goofev"
+
+ # reset values for simplicity of results in XOR
+ assert glide_sync_client.set(key1, "a") == OK
+ assert glide_sync_client.set(key2, "b") == OK
+ assert glide_sync_client.bitop(BitwiseOperation.XOR, destination, keys) == 1
+ assert glide_sync_client.get(destination) == "\u0003".encode()
+
+ # test single source key
+ assert glide_sync_client.bitop(BitwiseOperation.AND, destination, [key1]) == 1
+ assert glide_sync_client.get(destination) == b"a"
+ assert glide_sync_client.bitop(BitwiseOperation.OR, destination, [key1]) == 1
+ assert glide_sync_client.get(destination) == b"a"
+ assert glide_sync_client.bitop(BitwiseOperation.XOR, destination, [key1]) == 1
+ assert glide_sync_client.get(destination) == b"a"
+ assert glide_sync_client.bitop(BitwiseOperation.NOT, destination, [key1]) == 1
+ # currently, attempting to get the value from destination after the above NOT incorrectly raises an error
+ # TODO: update with a GET call once fix is implemented for https://github.com/valkey-io/valkey-glide/issues/1447
+
+ assert glide_sync_client.setbit(key1, 0, 1) == 0
+ assert glide_sync_client.bitop(BitwiseOperation.NOT, destination, [key1]) == 1
+ assert glide_sync_client.get(destination) == "\u001e".encode()
+
+ # stores None when all keys hold empty strings
+ assert (
+ glide_sync_client.bitop(
+ BitwiseOperation.AND, destination, non_existing_keys
+ )
+ == 0
+ )
+ assert glide_sync_client.get(destination) is None
+ assert (
+ glide_sync_client.bitop(
+ BitwiseOperation.OR, destination, non_existing_keys
+ )
+ == 0
+ )
+ assert glide_sync_client.get(destination) is None
+ assert (
+ glide_sync_client.bitop(
+ BitwiseOperation.XOR, destination, non_existing_keys
+ )
+ == 0
+ )
+ assert glide_sync_client.get(destination) is None
+ assert (
+ glide_sync_client.bitop(
+ BitwiseOperation.NOT, destination, [non_existing_key1]
+ )
+ == 0
+ )
+ assert glide_sync_client.get(destination) is None
+
+ # invalid argument - source key list cannot be empty
+ with pytest.raises(RequestError):
+ glide_sync_client.bitop(BitwiseOperation.OR, destination, [])
+
+ # invalid arguments - NOT cannot be passed more than 1 key
+ with pytest.raises(RequestError):
+ glide_sync_client.bitop(BitwiseOperation.NOT, destination, [key1, key2])
+
+ assert glide_sync_client.sadd(set_key, [value1]) == 1
+ # invalid argument - source key has the wrong type
+ with pytest.raises(RequestError):
+ glide_sync_client.bitop(BitwiseOperation.AND, destination, [set_key])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_bitfield(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ set_key = get_random_string(10)
+ foobar = "foobar"
+ u2 = UnsignedEncoding(2)
+ u7 = UnsignedEncoding(7)
+ i3 = SignedEncoding(3)
+ i8 = SignedEncoding(8)
+ offset1 = BitOffset(1)
+ offset5 = BitOffset(5)
+ offset_multiplier4 = BitOffsetMultiplier(4)
+ offset_multiplier8 = BitOffsetMultiplier(8)
+ overflow_set = BitFieldSet(u2, offset1, -10)
+ overflow_get = BitFieldGet(u2, offset1)
+
+ # binary value: 01100110 01101111 01101111 01100010 01100001 01110010
+ assert glide_sync_client.set(key1, foobar) == OK
+
+ # SET tests
+ assert glide_sync_client.bitfield(
+ key1,
+ [
+ # binary value becomes: 0(10)00110 01101111 01101111 01100010 01100001 01110010
+ BitFieldSet(u2, offset1, 2),
+ # binary value becomes: 01000(011) 01101111 01101111 01100010 01100001 01110010
+ BitFieldSet(i3, offset5, 3),
+ # binary value becomes: 01000011 01101111 01101111 0110(0010 010)00001 01110010
+ BitFieldSet(u7, offset_multiplier4, 18),
+ # addressing with SET or INCRBY bits outside the current string length will enlarge the string,
+ # zero-padding it, as needed, for the minimal length needed, according to the most far bit touched.
+ #
+ # binary value becomes:
+ # 01000011 01101111 01101111 01100010 01000001 01110010 00000000 00000000 (00010100)
+ BitFieldSet(i8, offset_multiplier8, 20),
+ BitFieldGet(u2, offset1),
+ BitFieldGet(i3, offset5),
+ BitFieldGet(u7, offset_multiplier4),
+ BitFieldGet(i8, offset_multiplier8),
+ ],
+ ) == [3, -2, 19, 0, 2, 3, 18, 20]
+
+ # INCRBY tests
+ assert glide_sync_client.bitfield(
+ key1,
+ [
+ # binary value becomes:
+ # 0(11)00011 01101111 01101111 01100010 01000001 01110010 00000000 00000000 00010100
+ BitFieldIncrBy(u2, offset1, 1),
+ # binary value becomes:
+ # 01100(101) 01101111 01101111 01100010 01000001 01110010 00000000 00000000 00010100
+ BitFieldIncrBy(i3, offset5, 2),
+ # binary value becomes:
+ # 01100101 01101111 01101111 0110(0001 111)00001 01110010 00000000 00000000 00010100
+ BitFieldIncrBy(u7, offset_multiplier4, -3),
+ # binary value becomes:
+ # 01100101 01101111 01101111 01100001 11100001 01110010 00000000 00000000 (00011110)
+ BitFieldIncrBy(i8, offset_multiplier8, 10),
+ ],
+ ) == [3, -3, 15, 30]
+
+ # OVERFLOW WRAP is used by default if no OVERFLOW is specified
+ assert glide_sync_client.bitfield(
+ key2,
+ [
+ overflow_set,
+ BitFieldOverflow(BitOverflowControl.WRAP),
+ overflow_set,
+ overflow_get,
+ ],
+ ) == [0, 2, 2]
+
+ # OVERFLOW affects only SET or INCRBY after OVERFLOW subcommand
+ assert glide_sync_client.bitfield(
+ key2,
+ [
+ overflow_set,
+ BitFieldOverflow(BitOverflowControl.SAT),
+ overflow_set,
+ overflow_get,
+ BitFieldOverflow(BitOverflowControl.FAIL),
+ overflow_set,
+ ],
+ ) == [2, 2, 3, None]
+
+ # if the key doesn't exist, the operation is performed as though the missing value was a string with all bits
+ # set to 0.
+ assert glide_sync_client.bitfield(
+ non_existing_key, [BitFieldSet(UnsignedEncoding(2), BitOffset(3), 2)]
+ ) == [0]
+
+ # empty subcommands argument returns an empty list
+ assert glide_sync_client.bitfield(key1, []) == []
+
+ # invalid argument - offset must be >= 0
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield(
+ key1, [BitFieldSet(UnsignedEncoding(5), BitOffset(-1), 1)]
+ )
+
+ # invalid argument - encoding size must be > 0
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield(
+ key1, [BitFieldSet(UnsignedEncoding(0), BitOffset(1), 1)]
+ )
+
+ # invalid argument - unsigned encoding size must be < 64
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield(
+ key1, [BitFieldSet(UnsignedEncoding(64), BitOffset(1), 1)]
+ )
+
+ # invalid argument - signed encoding size must be < 65
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield(
+ key1, [BitFieldSet(SignedEncoding(65), BitOffset(1), 1)]
+ )
+
+ # key exists, but it is not a string
+ assert glide_sync_client.sadd(set_key, [foobar]) == 1
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield(
+ set_key, [BitFieldSet(SignedEncoding(3), BitOffset(1), 2)]
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_bitfield_read_only(self, glide_sync_client: GlideSync):
+ min_version = "6.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ set_key = get_random_string(10)
+ foobar = "foobar"
+ unsigned_offset_get = BitFieldGet(UnsignedEncoding(2), BitOffset(1))
+
+ # binary value: 01100110 01101111 01101111 01100010 01100001 01110010
+ assert glide_sync_client.set(key, foobar) == OK
+ assert glide_sync_client.bitfield_read_only(
+ key,
+ [
+ # Get value in: 0(11)00110 01101111 01101111 01100010 01100001 01110010 00010100
+ unsigned_offset_get,
+ # Get value in: 01100(110) 01101111 01101111 01100010 01100001 01110010 00010100
+ BitFieldGet(SignedEncoding(3), BitOffset(5)),
+ # Get value in: 01100110 01101111 01101(111 0110)0010 01100001 01110010 00010100
+ BitFieldGet(UnsignedEncoding(7), BitOffsetMultiplier(3)),
+ # Get value in: 01100110 01101111 (01101111) 01100010 01100001 01110010 00010100
+ BitFieldGet(SignedEncoding(8), BitOffsetMultiplier(2)),
+ ],
+ ) == [3, -2, 118, 111]
+ # offset is greater than current length of string: the operation is performed like the missing part all consists
+ # of bits set to 0.
+ assert glide_sync_client.bitfield_read_only(
+ key, [BitFieldGet(UnsignedEncoding(3), BitOffset(100))]
+ ) == [0]
+ # similarly, if the key doesn't exist, the operation is performed as though the missing value was a string with
+ # all bits set to 0.
+ assert glide_sync_client.bitfield_read_only(
+ non_existing_key, [unsigned_offset_get]
+ ) == [0]
+
+ # empty subcommands argument returns an empty list
+ assert glide_sync_client.bitfield_read_only(key, []) == []
+
+ # invalid argument - offset must be >= 0
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield_read_only(
+ key, [BitFieldGet(UnsignedEncoding(5), BitOffset(-1))]
+ )
+
+ # invalid argument - encoding size must be > 0
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield_read_only(
+ key, [BitFieldGet(UnsignedEncoding(0), BitOffset(1))]
+ )
+
+ # invalid argument - unsigned encoding size must be < 64
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield_read_only(
+ key, [BitFieldGet(UnsignedEncoding(64), BitOffset(1))]
+ )
+
+ # invalid argument - signed encoding size must be < 65
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield_read_only(
+ key, [BitFieldGet(SignedEncoding(65), BitOffset(1))]
+ )
+
+ # key exists, but it is not a string
+ assert glide_sync_client.sadd(set_key, [foobar]) == 1
+ with pytest.raises(RequestError):
+ glide_sync_client.bitfield_read_only(set_key, [unsigned_offset_get])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_object_encoding(self, glide_sync_client: GlideSync):
+ string_key = get_random_string(10)
+ list_key = get_random_string(10)
+ hashtable_key = get_random_string(10)
+ intset_key = get_random_string(10)
+ set_listpack_key = get_random_string(10)
+ hash_hashtable_key = get_random_string(10)
+ hash_listpack_key = get_random_string(10)
+ skiplist_key = get_random_string(10)
+ zset_listpack_key = get_random_string(10)
+ stream_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+
+ assert glide_sync_client.object_encoding(non_existing_key) is None
+
+ assert glide_sync_client.set(
+ string_key, "a really loooooooooooooooooooooooooooooooooooooooong value"
+ )
+ assert glide_sync_client.object_encoding(string_key) == "raw".encode()
+
+ assert glide_sync_client.set(string_key, "2") == OK
+ assert glide_sync_client.object_encoding(string_key) == "int".encode()
+
+ assert glide_sync_client.set(string_key, "value") == OK
+ assert glide_sync_client.object_encoding(string_key) == "embstr".encode()
+
+ assert glide_sync_client.lpush(list_key, ["1"]) == 1
+ if check_if_server_version_lt(glide_sync_client, "7.2.0"):
+ assert glide_sync_client.object_encoding(list_key) == "quicklist".encode()
+ else:
+ assert glide_sync_client.object_encoding(list_key) == "listpack".encode()
+
+ # The default value of set-max-intset-entries is 512
+ for i in range(0, 513):
+ assert glide_sync_client.sadd(hashtable_key, [str(i)]) == 1
+ assert glide_sync_client.object_encoding(hashtable_key) == "hashtable".encode()
+
+ assert glide_sync_client.sadd(intset_key, ["1"]) == 1
+ assert glide_sync_client.object_encoding(intset_key) == "intset".encode()
+
+ assert glide_sync_client.sadd(set_listpack_key, ["foo"]) == 1
+ if check_if_server_version_lt(glide_sync_client, "7.2.0"):
+ assert (
+ glide_sync_client.object_encoding(set_listpack_key)
+ == "hashtable".encode()
+ )
+ else:
+ assert (
+ glide_sync_client.object_encoding(set_listpack_key)
+ == "listpack".encode()
+ )
+
+ # The default value of hash-max-listpack-entries is 512
+ for i in range(0, 513):
+ assert glide_sync_client.hset(hash_hashtable_key, {str(i): "2"}) == 1
+ assert (
+ glide_sync_client.object_encoding(hash_hashtable_key)
+ == "hashtable".encode()
+ )
+
+ assert glide_sync_client.hset(hash_listpack_key, {"1": "2"}) == 1
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert (
+ glide_sync_client.object_encoding(hash_listpack_key)
+ == "ziplist".encode()
+ )
+ else:
+ assert (
+ glide_sync_client.object_encoding(hash_listpack_key)
+ == "listpack".encode()
+ )
+
+ # The default value of zset-max-listpack-entries is 128
+ for i in range(0, 129):
+ assert glide_sync_client.zadd(skiplist_key, {str(i): 2.0}) == 1
+ assert glide_sync_client.object_encoding(skiplist_key) == "skiplist".encode()
+
+ assert glide_sync_client.zadd(zset_listpack_key, {"1": 2.0}) == 1
+ if check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ assert (
+ glide_sync_client.object_encoding(zset_listpack_key)
+ == "ziplist".encode()
+ )
+ else:
+ assert (
+ glide_sync_client.object_encoding(zset_listpack_key)
+ == "listpack".encode()
+ )
+
+ assert glide_sync_client.xadd(stream_key, [("field", "value")]) is not None
+ assert glide_sync_client.object_encoding(stream_key) == "stream".encode()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_object_freq(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ maxmemory_policy_key = "maxmemory-policy"
+ config = glide_sync_client.config_get([maxmemory_policy_key])
+ config_decoded = cast(dict, convert_bytes_to_string_object(config))
+ assert config_decoded is not None
+ maxmemory_policy = cast(str, config_decoded.get(maxmemory_policy_key))
+
+ try:
+ assert (
+ glide_sync_client.config_set({maxmemory_policy_key: "allkeys-lfu"})
+ == OK
+ )
+ assert glide_sync_client.object_freq(non_existing_key) is None
+ assert glide_sync_client.set(key, "") == OK
+ freq = glide_sync_client.object_freq(key)
+ assert freq is not None and freq >= 0
+ finally:
+ glide_sync_client.config_set({maxmemory_policy_key: maxmemory_policy})
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_object_idletime(self, glide_sync_client: GlideSync):
+ string_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+
+ assert glide_sync_client.object_idletime(non_existing_key) is None
+ assert glide_sync_client.set(string_key, "foo") == OK
+ time.sleep(2)
+ idletime = glide_sync_client.object_idletime(string_key)
+ assert idletime is not None and idletime > 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_object_refcount(self, glide_sync_client: GlideSync):
+ string_key = get_random_string(10)
+ non_existing_key = get_random_string(10)
+
+ assert glide_sync_client.object_refcount(non_existing_key) is None
+ assert glide_sync_client.set(string_key, "foo") == OK
+ refcount = glide_sync_client.object_refcount(string_key)
+ assert refcount is not None and refcount >= 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_load(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, True)
+
+ # verify function does not yet exist
+ assert glide_sync_client.function_list(lib_name) == []
+
+ assert glide_sync_client.function_load(code) == lib_name.encode()
+
+ assert glide_sync_client.fcall(func_name, arguments=["one", "two"]) == b"one"
+ assert (
+ glide_sync_client.fcall_ro(func_name, arguments=["one", "two"]) == b"one"
+ )
+
+ # verify with FUNCTION LIST
+ check_function_list_response(
+ glide_sync_client.function_list(lib_name, with_code=True),
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ code,
+ )
+
+ # re-load library without replace
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.function_load(code)
+ assert "Library '" + lib_name + "' already exists" in str(e)
+
+ # re-load library with replace
+ assert glide_sync_client.function_load(code, True) == lib_name.encode()
+
+ func2_name = f"myfunc2c{get_random_string(5)}"
+ new_code = f"""{code}\n redis.register_function({func2_name}, function(keys, args) return #args end)"""
+ new_code = generate_lua_lib_code(
+ lib_name, {func_name: "return args[1]", func2_name: "return #args"}, True
+ )
+
+ assert glide_sync_client.function_load(new_code, True) == lib_name.encode()
+
+ assert glide_sync_client.fcall(func2_name, arguments=["one", "two"]) == 2
+ assert glide_sync_client.fcall_ro(func2_name, arguments=["one", "two"]) == 2
+
+ assert glide_sync_client.function_flush(FlushMode.SYNC) is OK
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ @pytest.mark.parametrize("single_route", [True, False])
+ def test_sync_function_load_cluster_with_route(
+ self, glide_sync_client: GlideClusterClient, single_route: bool
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, True)
+ route = SlotKeyRoute(SlotType.PRIMARY, "1") if single_route else AllPrimaries()
+
+ # verify function does not yet exist
+ function_list = glide_sync_client.function_list(lib_name, False, route)
+ if single_route:
+ assert function_list == []
+ else:
+ assert isinstance(function_list, dict)
+ for functions in function_list.values():
+ assert functions == []
+
+ assert glide_sync_client.function_load(code, False, route) == lib_name.encode()
+
+ result = glide_sync_client.fcall_route(
+ func_name, arguments=["one", "two"], route=route
+ )
+
+ if single_route:
+ assert result == b"one"
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert nodeResponse == b"one"
+
+ result = glide_sync_client.fcall_ro_route(
+ func_name, arguments=["one", "two"], route=route
+ )
+
+ if single_route:
+ assert result == b"one"
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert nodeResponse == b"one"
+
+ # verify with FUNCTION LIST
+ function_list = glide_sync_client.function_list(
+ lib_name, with_code=True, route=route
+ )
+ if single_route:
+ check_function_list_response(
+ function_list,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ code,
+ )
+ else:
+ assert isinstance(function_list, dict)
+ for nodeResponse in function_list.values():
+ check_function_list_response(
+ nodeResponse,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ code,
+ )
+
+ # re-load library without replace
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.function_load(code, False, route)
+ assert "Library '" + lib_name + "' already exists" in str(e)
+
+ # re-load library with replace
+ assert glide_sync_client.function_load(code, True, route) == lib_name.encode()
+
+ func2_name = f"myfunc2c{get_random_string(5)}"
+ new_code = f"""{code}\n redis.register_function({func2_name}, function(keys, args) return #args end)"""
+ new_code = generate_lua_lib_code(
+ lib_name, {func_name: "return args[1]", func2_name: "return #args"}, True
+ )
+
+ assert (
+ glide_sync_client.function_load(new_code, True, route) == lib_name.encode()
+ )
+
+ result = glide_sync_client.fcall_route(
+ func2_name, arguments=["one", "two"], route=route
+ )
+
+ if single_route:
+ assert result == 2
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert nodeResponse == 2
+
+ result = glide_sync_client.fcall_ro_route(
+ func2_name, arguments=["one", "two"], route=route
+ )
+
+ if single_route:
+ assert result == 2
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert nodeResponse == 2
+
+ assert glide_sync_client.function_flush(FlushMode.SYNC, route) is OK
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_list(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ original_functions_count = len(glide_sync_client.function_list())
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, True)
+
+ # Assert function `lib_name` does not yet exist
+ assert glide_sync_client.function_list(lib_name) == []
+
+ # load library
+ glide_sync_client.function_load(code)
+
+ check_function_list_response(
+ glide_sync_client.function_list(lib_name.encode()),
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ None,
+ )
+ check_function_list_response(
+ glide_sync_client.function_list(f"{lib_name}*"),
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ None,
+ )
+ check_function_list_response(
+ glide_sync_client.function_list(lib_name, with_code=True),
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ code,
+ )
+
+ no_args_response = glide_sync_client.function_list()
+ wildcard_pattern_response = glide_sync_client.function_list(
+ "*".encode(), False
+ )
+ assert len(no_args_response) == original_functions_count + 1
+ assert len(wildcard_pattern_response) == original_functions_count + 1
+ check_function_list_response(
+ no_args_response,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ None,
+ )
+ check_function_list_response(
+ wildcard_pattern_response,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ None,
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ @pytest.mark.parametrize("single_route", [True, False])
+ def test_sync_function_list_with_routing(
+ self, glide_sync_client: GlideClusterClient, single_route: bool
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ route = SlotKeyRoute(SlotType.PRIMARY, "1") if single_route else AllPrimaries()
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, True)
+
+ # Assert function `lib_name` does not yet exist
+ result = glide_sync_client.function_list(lib_name, route=route)
+ if single_route:
+ assert result == []
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert nodeResponse == []
+
+ # load library
+ glide_sync_client.function_load(code, route=route)
+
+ result = glide_sync_client.function_list(lib_name, route=route)
+ if single_route:
+ check_function_list_response(
+ result,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ None,
+ )
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ check_function_list_response(
+ nodeResponse,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ None,
+ )
+
+ result = glide_sync_client.function_list(f"{lib_name}*", route=route)
+ if single_route:
+ check_function_list_response(
+ result,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ None,
+ )
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ check_function_list_response(
+ nodeResponse,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ None,
+ )
+
+ result = glide_sync_client.function_list(lib_name, with_code=True, route=route)
+ if single_route:
+ check_function_list_response(
+ result,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ code,
+ )
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ check_function_list_response(
+ nodeResponse,
+ lib_name,
+ {func_name: None},
+ {func_name: {b"no-writes"}},
+ code,
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_list_with_multiple_functions(
+ self, glide_sync_client: GlideSync
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ glide_sync_client.function_flush()
+ assert len(glide_sync_client.function_list()) == 0
+
+ lib_name_1 = f"mylib1C{get_random_string(5)}"
+ func_name_1 = f"myfunc1c{get_random_string(5)}"
+ func_name_2 = f"myfunc2c{get_random_string(5)}"
+ code_1 = generate_lua_lib_code(
+ lib_name_1,
+ {func_name_1: "return args[1]", func_name_2: "return args[2]"},
+ False,
+ )
+ glide_sync_client.function_load(code_1)
+
+ lib_name_2 = f"mylib2C{get_random_string(5)}"
+ func_name_3 = f"myfunc3c{get_random_string(5)}"
+ code_2 = generate_lua_lib_code(
+ lib_name_2, {func_name_3: "return args[3]"}, True
+ )
+ glide_sync_client.function_load(code_2)
+
+ no_args_response = glide_sync_client.function_list()
+
+ assert len(no_args_response) == 2
+ check_function_list_response(
+ no_args_response,
+ lib_name_1,
+ {func_name_1: None, func_name_2: None},
+ {func_name_1: set(), func_name_2: set()},
+ None,
+ )
+ check_function_list_response(
+ no_args_response,
+ lib_name_2,
+ {func_name_3: None},
+ {func_name_3: {b"no-writes"}},
+ None,
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_flush(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ pytest.skip(f"Valkey version required >= {min_version}")
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, True)
+
+ # Load the function
+ assert glide_sync_client.function_load(code) == lib_name.encode()
+
+ # verify function exists
+ assert len(glide_sync_client.function_list(lib_name)) == 1
+
+ # Flush functions
+ assert glide_sync_client.function_flush(FlushMode.SYNC) == OK
+ assert glide_sync_client.function_flush(FlushMode.ASYNC) == OK
+
+ # verify function is removed
+ assert len(glide_sync_client.function_list(lib_name)) == 0
+
+ # Attempt to re-load library without overwriting to ensure FLUSH was effective
+ assert glide_sync_client.function_load(code) == lib_name.encode()
+
+ # verify function exists
+ assert len(glide_sync_client.function_list(lib_name)) == 1
+
+ # Clean up by flushing functions again
+ glide_sync_client.function_flush()
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ @pytest.mark.parametrize("single_route", [True, False])
+ def test_sync_function_flush_with_routing(
+ self, glide_sync_client: GlideClusterClient, single_route: bool
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ pytest.skip(f"Valkey version required >= {min_version}")
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, True)
+ route = SlotKeyRoute(SlotType.PRIMARY, "1") if single_route else AllPrimaries()
+
+ # Load the function
+ assert glide_sync_client.function_load(code, False, route) == lib_name.encode()
+
+ # verify function exists
+ result = glide_sync_client.function_list(lib_name, False, route)
+ if single_route:
+ assert len(result) == 1
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert len(nodeResponse) == 1
+
+ # Flush functions
+ assert glide_sync_client.function_flush(FlushMode.SYNC, route) == OK
+ assert glide_sync_client.function_flush(FlushMode.ASYNC, route) == OK
+
+ # verify function is removed
+ result = glide_sync_client.function_list(lib_name, False, route)
+ if single_route:
+ assert len(result) == 0
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert len(nodeResponse) == 0
+
+ # Attempt to re-load library without overwriting to ensure FLUSH was effective
+ assert glide_sync_client.function_load(code, False, route) == lib_name.encode()
+
+ # verify function exists
+ result = glide_sync_client.function_list(lib_name, False, route)
+ if single_route:
+ assert len(result) == 1
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert len(nodeResponse) == 1
+
+ # Clean up by flushing functions again
+ assert glide_sync_client.function_flush(route=route) == OK
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_delete(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ pytest.skip(f"Valkey version required >= {min_version}")
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, True)
+
+ # Load the function
+ assert glide_sync_client.function_load(code) == lib_name.encode()
+
+ # verify function exists
+ assert len(glide_sync_client.function_list(lib_name)) == 1
+
+ # Delete the function
+ assert glide_sync_client.function_delete(lib_name) == OK
+
+ # verify function is removed
+ assert len(glide_sync_client.function_list(lib_name)) == 0
+
+ # deleting a non-existing library
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.function_delete(lib_name)
+ assert "Library not found" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ @pytest.mark.parametrize("single_route", [True, False])
+ def test_sync_function_delete_with_routing(
+ self, glide_sync_client: GlideClusterClient, single_route: bool
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ pytest.skip(f"Valkey version required >= {min_version}")
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, True)
+ route = SlotKeyRoute(SlotType.PRIMARY, "1") if single_route else AllPrimaries()
+
+ # Load the function
+ assert glide_sync_client.function_load(code, False, route) == lib_name.encode()
+
+ # verify function exists
+ result = glide_sync_client.function_list(lib_name, False, route)
+ if single_route:
+ assert len(result) == 1
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert len(nodeResponse) == 1
+
+ # Delete the function
+ assert glide_sync_client.function_delete(lib_name, route) == OK
+
+ # verify function is removed
+ result = glide_sync_client.function_list(lib_name, False, route)
+ if single_route:
+ assert len(result) == 0
+ else:
+ assert isinstance(result, dict)
+ for nodeResponse in result.values():
+ assert len(nodeResponse) == 0
+
+ # deleting a non-existing library
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.function_delete(lib_name)
+ assert "Library not found" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_stats(self, glide_sync_client: GlideSync):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ lib_name = "functionStats_without_route"
+ func_name = lib_name
+ assert glide_sync_client.function_flush(FlushMode.SYNC) == OK
+
+ # function $funcName returns first argument
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, False)
+ assert glide_sync_client.function_load(code, True) == lib_name.encode()
+
+ response = glide_sync_client.function_stats()
+ for node_response in response.values():
+ check_function_stats_response(
+ cast(TFunctionStatsSingleNodeResponse, node_response), [], 1, 1
+ )
+
+ code = generate_lua_lib_code(
+ lib_name + "_2",
+ {func_name + "_2": "return 'OK'", func_name + "_3": "return 42"},
+ False,
+ )
+ assert (
+ glide_sync_client.function_load(code, True) == (lib_name + "_2").encode()
+ )
+
+ response = glide_sync_client.function_stats()
+ for node_response in response.values():
+ check_function_stats_response(
+ cast(TFunctionStatsSingleNodeResponse, node_response), [], 2, 3
+ )
+
+ assert glide_sync_client.function_flush(FlushMode.SYNC) == OK
+
+ response = glide_sync_client.function_stats()
+ for node_response in response.values():
+ check_function_stats_response(
+ cast(TFunctionStatsSingleNodeResponse, node_response), [], 0, 0
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [False, True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_stats_running_script(
+ self, request, cluster_mode, protocol, glide_sync_client: GlideSync
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = create_lua_lib_with_long_running_function(lib_name, func_name, 10, True)
+
+ # load the library
+ assert glide_sync_client.function_load(code, replace=True) == lib_name.encode()
+
+ # create a second client to run fcall
+ test_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000
+ )
+
+ test_sync_client2 = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000
+ )
+
+ def endless_fcall_route_call():
+ test_sync_client.fcall_ro(func_name, arguments=[])
+
+ def wait_and_function_stats():
+ # it can take a few seconds for FCALL to register as running
+ asyncio.sleep(3)
+ result = test_sync_client2.function_stats()
+ running_scripts = False
+ for res in result.values():
+ if res.get(b"running_script"):
+ if running_scripts:
+ raise Exception("Already running script on a different node")
+ running_scripts = True
+ assert res.get(b"running_script").get(b"name") == func_name.encode()
+ assert res.get(b"running_script").get(b"command") == [
+ b"FCALL_RO",
+ func_name.encode(),
+ b"0",
+ ]
+ assert res.get(b"running_script").get(b"duration_ms") > 0
+
+ assert running_scripts
+
+ asyncio.gather(
+ endless_fcall_route_call(),
+ wait_and_function_stats(),
+ )
+
+ test_sync_client.close()
+ test_sync_client2.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ @pytest.mark.parametrize("single_route", [True, False])
+ def test_sync_function_stats_with_routing(
+ self, glide_sync_client: GlideClusterClient, single_route: bool
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ route = (
+ SlotKeyRoute(SlotType.PRIMARY, get_random_string(10))
+ if single_route
+ else AllPrimaries()
+ )
+ lib_name = "functionStats_with_route_" + str(single_route)
+ func_name = lib_name
+ assert glide_sync_client.function_flush(FlushMode.SYNC, route) == OK
+
+ # function $funcName returns first argument
+ code = generate_lua_lib_code(lib_name, {func_name: "return args[1]"}, False)
+ assert glide_sync_client.function_load(code, True, route) == lib_name.encode()
+
+ response = glide_sync_client.function_stats(route)
+ if single_route:
+ check_function_stats_response(
+ cast(TFunctionStatsSingleNodeResponse, response), [], 1, 1
+ )
+ else:
+ for node_response in response.values():
+ check_function_stats_response(
+ cast(TFunctionStatsSingleNodeResponse, node_response), [], 1, 1
+ )
+
+ code = generate_lua_lib_code(
+ lib_name + "_2",
+ {func_name + "_2": "return 'OK'", func_name + "_3": "return 42"},
+ False,
+ )
+ assert (
+ glide_sync_client.function_load(code, True, route)
+ == (lib_name + "_2").encode()
+ )
+
+ response = glide_sync_client.function_stats(route)
+ if single_route:
+ check_function_stats_response(
+ cast(TFunctionStatsSingleNodeResponse, response), [], 2, 3
+ )
+ else:
+ for node_response in response.values():
+ check_function_stats_response(
+ cast(TFunctionStatsSingleNodeResponse, node_response), [], 2, 3
+ )
+
+ assert glide_sync_client.function_flush(FlushMode.SYNC, route) == OK
+
+ response = glide_sync_client.function_stats(route)
+ if single_route:
+ check_function_stats_response(
+ cast(TFunctionStatsSingleNodeResponse, response), [], 0, 0
+ )
+ else:
+ for node_response in response.values():
+ check_function_stats_response(
+ cast(TFunctionStatsSingleNodeResponse, node_response), [], 0, 0
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_kill_no_write(
+ self, request, cluster_mode, protocol, glide_sync_client: GlideSync
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = create_lua_lib_with_long_running_function(lib_name, func_name, 10, True)
+
+ # nothing to kill
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.function_kill()
+ assert "NotBusy" in str(e)
+
+ # load the library
+ assert glide_sync_client.function_load(code, replace=True) == lib_name.encode()
+
+ # create a second client to run fcall
+ test_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=15000
+ )
+
+ def endless_fcall_route_call():
+ # fcall is supposed to be killed, and will return a RequestError
+ with pytest.raises(RequestError) as e:
+ test_sync_client.fcall_ro(func_name, arguments=[])
+ assert "Script killed by user" in str(e)
+
+ def wait_and_function_kill():
+ # it can take a few seconds for FCALL to register as running
+ asyncio.sleep(3)
+ timeout = 0
+ while timeout <= 5:
+ # keep trying to kill until we get an "OK"
+ try:
+ result = glide_sync_client.function_kill()
+ # we expect to get success
+ assert result == "OK"
+ break
+ except RequestError:
+ # a RequestError may occur if the function is not yet running
+ # sleep and try again
+ timeout += 0.5
+ asyncio.sleep(0.5)
+
+ asyncio.gather(
+ endless_fcall_route_call(),
+ wait_and_function_kill(),
+ )
+
+ # no functions running so we get notbusy error again
+ with pytest.raises(RequestError) as e:
+ assert glide_sync_client.function_kill()
+ assert "NotBusy" in str(e)
+ test_sync_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [False, True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_kill_write_is_unkillable(
+ self, request, cluster_mode, protocol, glide_sync_client: GlideSync
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = create_lua_lib_with_long_running_function(lib_name, func_name, 10, False)
+
+ # load the library on all primaries
+ assert glide_sync_client.function_load(code, replace=True) == lib_name.encode()
+
+ # create a second client to run fcall - and give it a long timeout
+ test_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=15000
+ )
+
+ # call fcall to run the function loaded function
+ def endless_fcall_route_call():
+ # fcall won't be killed, because kill only works against fcalls that don't make a write operation
+ # use fcall(key) so that it makes a write operation
+ test_sync_client.fcall(func_name, keys=[lib_name])
+
+ def wait_and_function_kill():
+ # it can take a few seconds for FCALL to register as running
+ asyncio.sleep(3)
+ timeout = 0
+ foundUnkillable = False
+ while timeout <= 5:
+ # keep trying to kill until we get a unkillable return error
+ try:
+ glide_sync_client.function_kill()
+ except RequestError as e:
+ if "UNKILLABLE" in str(e):
+ foundUnkillable = True
+ break
+ timeout += 0.5
+ asyncio.sleep(0.5)
+ # expect an unkillable error
+ assert foundUnkillable
+
+ asyncio.gather(
+ endless_fcall_route_call(),
+ wait_and_function_kill(),
+ )
+ test_sync_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_fcall_with_key(self, glide_sync_client: GlideClusterClient):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ key1 = f"{{testKey}}:1-{get_random_string(10)}"
+ key2 = f"{{testKey}}:2-{get_random_string(10)}"
+ keys: List[TEncodable] = [key1, key2]
+ route = SlotKeyRoute(SlotType.PRIMARY, key1)
+ lib_name = f"mylib1C{get_random_string(5)}"
+ func_name = f"myfunc1c{get_random_string(5)}"
+ code = generate_lua_lib_code(lib_name, {func_name: "return keys[1]"}, True)
+
+ assert glide_sync_client.function_flush(FlushMode.SYNC, route) is OK
+ assert glide_sync_client.function_load(code, False, route) == lib_name.encode()
+
+ assert (
+ glide_sync_client.fcall(func_name, keys=keys, arguments=[])
+ == key1.encode()
+ )
+
+ assert (
+ glide_sync_client.fcall_ro(func_name, keys=keys, arguments=[])
+ == key1.encode()
+ )
+
+ transaction = ClusterTransaction()
+
+ transaction.fcall(func_name, keys=keys, arguments=[])
+ transaction.fcall_ro(func_name, keys=keys, arguments=[])
+
+ # check response from a routed transaction request
+ result = glide_sync_client.exec(transaction, route)
+ assert result is not None
+ assert result[0] == key1.encode()
+ assert result[1] == key1.encode()
+
+ # if no route given, GLIDE should detect it automatically
+ result = glide_sync_client.exec(transaction)
+ assert result is not None
+ assert result[0] == key1.encode()
+ assert result[1] == key1.encode()
+
+ assert glide_sync_client.function_flush(FlushMode.SYNC, route) is OK
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_fcall_readonly_function(self, glide_sync_client: GlideClusterClient):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ lib_name = f"fcall_readonly_function{get_random_string(5)}"
+ # intentionally using a REPLICA route
+ replicaRoute = SlotKeyRoute(SlotType.REPLICA, lib_name)
+ primaryRoute = SlotKeyRoute(SlotType.PRIMARY, lib_name)
+ func_name = f"fcall_readonly_function{get_random_string(5)}"
+
+ # function $funcName returns a magic number
+ code = generate_lua_lib_code(lib_name, {func_name: "return 42"}, False)
+
+ assert glide_sync_client.function_load(code, False) == lib_name.encode()
+
+ # On a replica node should fail, because a function isn't guaranteed to be RO
+ with pytest.raises(RequestError) as e:
+ assert glide_sync_client.fcall_route(
+ func_name, arguments=[], route=replicaRoute
+ )
+ assert "You can't write against a read only replica." in str(e)
+
+ with pytest.raises(RequestError) as e:
+ assert glide_sync_client.fcall_ro_route(
+ func_name, arguments=[], route=replicaRoute
+ )
+ assert "You can't write against a read only replica." in str(e)
+
+ # fcall_ro also fails to run it even on primary - another error
+ with pytest.raises(RequestError) as e:
+ assert glide_sync_client.fcall_ro_route(
+ func_name, arguments=[], route=primaryRoute
+ )
+ assert "Can not execute a script with write flag using *_ro command." in str(e)
+
+ # create the same function, but with RO flag
+ code = generate_lua_lib_code(lib_name, {func_name: "return 42"}, True)
+ assert glide_sync_client.function_load(code, True) == lib_name.encode()
+
+ # fcall should succeed now
+ assert (
+ glide_sync_client.fcall_ro_route(
+ func_name, arguments=[], route=replicaRoute
+ )
+ == 42
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_dump_restore_standalone(self, glide_sync_client: GlideClient):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ assert glide_sync_client.function_flush(FlushMode.SYNC) is OK
+
+ # Dump an empty lib
+ emptyDump = glide_sync_client.function_dump()
+ assert emptyDump is not None and len(emptyDump) > 0
+
+ name1 = f"Foster{get_random_string(5)}"
+ name2 = f"Dogster{get_random_string(5)}"
+
+ # function name1 returns first argument; function name2 returns argument array len
+ code = generate_lua_lib_code(
+ name1, {name1: "return args[1]", name2: "return #args"}, False
+ )
+ assert glide_sync_client.function_load(code, True) == name1.encode()
+ flist = glide_sync_client.function_list(with_code=True)
+
+ dump = glide_sync_client.function_dump()
+ assert dump is not None
+
+ # restore without cleaning the lib and/or overwrite option causes an error
+ with pytest.raises(RequestError) as e:
+ assert glide_sync_client.function_restore(dump)
+ assert "already exists" in str(e)
+
+ # APPEND policy also fails for the same reason (name collision)
+ with pytest.raises(RequestError) as e:
+ assert glide_sync_client.function_restore(
+ dump, FunctionRestorePolicy.APPEND
+ )
+ assert "already exists" in str(e)
+
+ # REPLACE policy succeed
+ assert (
+ glide_sync_client.function_restore(dump, FunctionRestorePolicy.REPLACE)
+ is OK
+ )
+
+ # but nothing changed - all code overwritten
+ assert glide_sync_client.function_list(with_code=True) == flist
+
+ # create lib with another name, but with the same function names
+ assert glide_sync_client.function_flush(FlushMode.SYNC) is OK
+ code = generate_lua_lib_code(
+ name2, {name1: "return args[1]", name2: "return #args"}, False
+ )
+ assert glide_sync_client.function_load(code, True) == name2.encode()
+
+ # REPLACE policy now fails due to a name collision
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.function_restore(dump, FunctionRestorePolicy.REPLACE)
+ assert "already exists" in str(e)
+
+ # FLUSH policy succeeds, but deletes the second lib
+ assert (
+ glide_sync_client.function_restore(dump, FunctionRestorePolicy.FLUSH) is OK
+ )
+ assert glide_sync_client.function_list(with_code=True) == flist
+
+ # call restored functions
+ assert (
+ glide_sync_client.fcall(name1, arguments=["meow", "woem"])
+ == "meow".encode()
+ )
+ assert glide_sync_client.fcall(name2, arguments=["meow", "woem"]) == 2
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_function_dump_restore_cluster(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ assert glide_sync_client.function_flush(FlushMode.SYNC) is OK
+
+ # Dump an empty lib
+ emptyDump = glide_sync_client.function_dump()
+ assert emptyDump is not None and len(emptyDump) > 0
+
+ name1 = f"Foster{get_random_string(5)}"
+ libname1 = f"FosterLib{get_random_string(5)}"
+ name2 = f"Dogster{get_random_string(5)}"
+ libname2 = f"DogsterLib{get_random_string(5)}"
+
+ # function name1 returns first argument; function name2 returns argument array len
+ code = generate_lua_lib_code(
+ libname1, {name1: "return args[1]", name2: "return #args"}, True
+ )
+ assert glide_sync_client.function_load(code, True) == libname1.encode()
+ flist = glide_sync_client.function_list(with_code=True)
+ dump = glide_sync_client.function_dump(RandomNode())
+ assert dump is not None and isinstance(dump, bytes)
+
+ # restore without cleaning the lib and/or overwrite option causes an error
+ with pytest.raises(RequestError) as e:
+ assert glide_sync_client.function_restore(dump)
+ assert "already exists" in str(e)
+
+ # APPEND policy also fails for the same reason (name collision)
+ with pytest.raises(RequestError) as e:
+ assert glide_sync_client.function_restore(
+ dump, FunctionRestorePolicy.APPEND
+ )
+ assert "already exists" in str(e)
+
+ # REPLACE policy succeed
+ assert (
+ glide_sync_client.function_restore(
+ dump, FunctionRestorePolicy.REPLACE, route=AllPrimaries()
+ )
+ is OK
+ )
+
+ # but nothing changed - all code overwritten
+ restoredFunctionList = glide_sync_client.function_list(with_code=True)
+ assert restoredFunctionList is not None
+ assert isinstance(restoredFunctionList, List) and len(restoredFunctionList) == 1
+ assert restoredFunctionList[0]["library_name".encode()] == libname1.encode()
+
+ # Note that function ordering may differ across nodes so we can't do a deep equals
+ assert len(restoredFunctionList[0]["functions".encode()]) == 2
+
+ # create lib with another name, but with the same function names
+ assert glide_sync_client.function_flush(FlushMode.SYNC) is OK
+ code = generate_lua_lib_code(
+ libname2, {name1: "return args[1]", name2: "return #args"}, True
+ )
+ assert glide_sync_client.function_load(code, True) == libname2.encode()
+ restoredFunctionList = glide_sync_client.function_list(with_code=True)
+ assert restoredFunctionList is not None
+ assert isinstance(restoredFunctionList, List) and len(restoredFunctionList) == 1
+ assert restoredFunctionList[0]["library_name".encode()] == libname2.encode()
+
+ # REPLACE policy now fails due to a name collision
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.function_restore(dump, FunctionRestorePolicy.REPLACE)
+ assert "already exists" in str(e)
+
+ # FLUSH policy succeeds, but deletes the second lib
+ assert (
+ glide_sync_client.function_restore(dump, FunctionRestorePolicy.FLUSH) is OK
+ )
+ restoredFunctionList = glide_sync_client.function_list(with_code=True)
+ assert restoredFunctionList is not None
+ assert isinstance(restoredFunctionList, List) and len(restoredFunctionList) == 1
+ assert restoredFunctionList[0]["library_name".encode()] == libname1.encode()
+
+ # Note that function ordering may differ across nodes so we can't do a deep equals
+ assert len(restoredFunctionList[0]["functions".encode()]) == 2
+
+ # call restored functions
+ assert (
+ glide_sync_client.fcall_ro(name1, arguments=["meow", "woem"])
+ == "meow".encode()
+ )
+ assert glide_sync_client.fcall_ro(name2, arguments=["meow", "woem"]) == 2
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_srandmember(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ string_key = get_random_string(10)
+ elements: List[TEncodable] = ["one", "two"]
+ assert glide_sync_client.sadd(key, elements) == 2
+
+ member = glide_sync_client.srandmember(key)
+ # TODO: remove when function signature is fixed
+ assert isinstance(member, bytes)
+ assert member.decode() in elements
+ assert glide_sync_client.srandmember("non_existing_key") is None
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.srandmember(string_key)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_srandmember_count(self, glide_sync_client: GlideSync):
+ key = get_random_string(10)
+ string_key = get_random_string(10)
+ elements: List[TEncodable] = ["one", "two"]
+ assert glide_sync_client.sadd(key, elements) == 2
+
+ # unique values are expected as count is positive
+ members = glide_sync_client.srandmember_count(key, 4)
+ assert len(members) == 2
+ assert set(members) == {b"one", b"two"}
+
+ # duplicate values are expected as count is negative
+ members = glide_sync_client.srandmember_count(key, -4)
+ assert len(members) == 4
+ for member in members:
+ # TODO: remove when function signature is fixed
+ assert isinstance(member, bytes)
+ assert member.decode() in elements
+
+ # empty return values for non-existing or empty keys
+ assert glide_sync_client.srandmember_count(key, 0) == []
+ assert glide_sync_client.srandmember_count("non_existing_key", 0) == []
+
+ # key exists, but it is not a set
+ assert glide_sync_client.set(string_key, "value") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.srandmember_count(string_key, 8)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_flushall(self, glide_sync_client: GlideSync):
+ min_version = "6.2.0"
+ key = f"{{key}}-1{get_random_string(5)}"
+ value = get_random_string(5)
+
+ glide_sync_client.set(key, value)
+ assert glide_sync_client.dbsize() > 0
+ assert glide_sync_client.flushall() == OK
+ assert glide_sync_client.flushall(FlushMode.ASYNC) == OK
+ if not check_if_server_version_lt(glide_sync_client, min_version):
+ assert glide_sync_client.flushall(FlushMode.SYNC) == OK
+ assert glide_sync_client.dbsize() == 0
+
+ if isinstance(glide_sync_client, GlideClusterClient):
+ glide_sync_client.set(key, value)
+ assert glide_sync_client.flushall(route=AllPrimaries()) == OK
+ assert glide_sync_client.flushall(FlushMode.ASYNC, AllPrimaries()) == OK
+ if not check_if_server_version_lt(glide_sync_client, min_version):
+ assert glide_sync_client.flushall(FlushMode.SYNC, AllPrimaries()) == OK
+ assert glide_sync_client.dbsize() == 0
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_standalone_flushdb(self, glide_sync_client: GlideClient):
+ min_version = "6.2.0"
+ key1 = f"{{key}}-1{get_random_string(5)}"
+ key2 = f"{{key}}-2{get_random_string(5)}"
+ value = get_random_string(5)
+
+ # fill DB 0 and check size non-empty
+ assert glide_sync_client.select(0) == OK
+ glide_sync_client.set(key1, value)
+ assert glide_sync_client.dbsize() > 0
+
+ # fill DB 1 and check size non-empty
+ assert glide_sync_client.select(1) == OK
+ glide_sync_client.set(key2, value)
+ assert glide_sync_client.dbsize() > 0
+
+ # flush DB 1 and check again
+ assert glide_sync_client.flushdb() == OK
+ assert glide_sync_client.dbsize() == 0
+
+ # swith to DB 0, flush, and check
+ assert glide_sync_client.select(0) == OK
+ assert glide_sync_client.dbsize() > 0
+ assert glide_sync_client.flushdb(FlushMode.ASYNC) == OK
+ assert glide_sync_client.dbsize() == 0
+
+ # verify flush SYNC
+ if not check_if_server_version_lt(glide_sync_client, min_version):
+ glide_sync_client.set(key2, value)
+ assert glide_sync_client.dbsize() > 0
+ assert glide_sync_client.flushdb(FlushMode.SYNC) == OK
+ assert glide_sync_client.dbsize() == 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_getex(self, glide_sync_client: GlideSync):
+ min_version = "6.2.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ key1 = get_random_string(10)
+ non_existing_key = get_random_string(10)
+ value = get_random_string(10)
+ value_encoded = value.encode()
+
+ assert glide_sync_client.set(key1, value) == OK
+ assert glide_sync_client.getex(non_existing_key) is None
+ assert glide_sync_client.getex(key1) == value_encoded
+ assert glide_sync_client.ttl(key1) == -1
+
+ # setting expiration timer
+ assert (
+ glide_sync_client.getex(key1, ExpiryGetEx(ExpiryTypeGetEx.MILLSEC, 50))
+ == value_encoded
+ )
+ assert glide_sync_client.ttl(key1) != -1
+
+ # setting and clearing expiration timer
+ assert glide_sync_client.set(key1, value) == OK
+ assert (
+ glide_sync_client.getex(key1, ExpiryGetEx(ExpiryTypeGetEx.SEC, 10))
+ == value_encoded
+ )
+ assert (
+ glide_sync_client.getex(key1, ExpiryGetEx(ExpiryTypeGetEx.PERSIST, None))
+ == value_encoded
+ )
+ assert glide_sync_client.ttl(key1) == -1
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_copy_no_database(self, glide_sync_client: GlideSync):
+ min_version = "6.2.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ source = f"{{testKey}}:1-{get_random_string(10)}"
+ destination = f"{{testKey}}:2-{get_random_string(10)}"
+ value1 = get_random_string(5)
+ value2 = get_random_string(5)
+ value1_encoded = value1.encode()
+
+ # neither key exists
+ assert glide_sync_client.copy(source, destination, replace=False) is False
+ assert glide_sync_client.copy(source, destination) is False
+
+ # source exists, destination does not
+ glide_sync_client.set(source, value1)
+ assert glide_sync_client.copy(source, destination, replace=False) is True
+ assert glide_sync_client.get(destination) == value1_encoded
+
+ # new value for source key
+ glide_sync_client.set(source, value2)
+
+ # both exists, no REPLACE
+ assert glide_sync_client.copy(source, destination) is False
+ assert glide_sync_client.copy(source, destination, replace=False) is False
+ assert glide_sync_client.get(destination) == value1_encoded
+
+ # both exists, with REPLACE
+ assert glide_sync_client.copy(source, destination, replace=True) is True
+ assert glide_sync_client.get(destination) == value2.encode()
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_copy_database(self, glide_sync_client: GlideClient):
+ min_version = "6.2.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ source = get_random_string(10)
+ destination = get_random_string(10)
+ value1 = get_random_string(5)
+ value2 = get_random_string(5)
+ value1_encoded = value1.encode()
+ value2_encoded = value2.encode()
+ index0 = 0
+ index1 = 1
+ index2 = 2
+
+ try:
+ assert glide_sync_client.select(index0) == OK
+
+ # neither key exists
+ assert (
+ glide_sync_client.copy(source, destination, index1, replace=False)
+ is False
+ )
+
+ # source exists, destination does not
+ glide_sync_client.set(source, value1)
+ assert (
+ glide_sync_client.copy(source, destination, index1, replace=False)
+ is True
+ )
+ assert glide_sync_client.select(index1) == OK
+ assert glide_sync_client.get(destination) == value1_encoded
+
+ # new value for source key
+ assert glide_sync_client.select(index0) == OK
+ glide_sync_client.set(source, value2)
+
+ # no REPLACE, copying to existing key on DB 0 & 1, non-existing key on DB 2
+ assert (
+ glide_sync_client.copy(source, destination, index1, replace=False)
+ is False
+ )
+ assert (
+ glide_sync_client.copy(source, destination, index2, replace=False)
+ is True
+ )
+
+ # new value only gets copied to DB 2
+ assert glide_sync_client.select(index1) == OK
+ assert glide_sync_client.get(destination) == value1_encoded
+ assert glide_sync_client.select(index2) == OK
+ assert glide_sync_client.get(destination) == value2_encoded
+
+ # both exists, with REPLACE, when value isn't the same, source always get copied to destination
+ assert glide_sync_client.select(index0) == OK
+ assert (
+ glide_sync_client.copy(source, destination, index1, replace=True)
+ is True
+ )
+ assert glide_sync_client.select(index1) == OK
+ assert glide_sync_client.get(destination) == value2_encoded
+
+ # invalid DB index
+ with pytest.raises(RequestError):
+ glide_sync_client.copy(source, destination, -1, replace=True)
+ finally:
+ assert glide_sync_client.select(0) == OK
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_wait(self, glide_sync_client: GlideSync):
+ key = f"{{key}}-1{get_random_string(5)}"
+ value = get_random_string(5)
+ value2 = get_random_string(5)
+
+ assert glide_sync_client.set(key, value) == OK
+ if isinstance(glide_sync_client, GlideClusterClient):
+ assert glide_sync_client.wait(1, 1000) >= 1
+ else:
+ assert glide_sync_client.wait(1, 1000) >= 0
+
+ # ensure that command doesn't time out even if timeout > request timeout (250ms by default)
+ assert glide_sync_client.set(key, value2) == OK
+ assert glide_sync_client.wait(100, 500) >= 0
+
+ # command should fail on a negative timeout value
+ with pytest.raises(RequestError):
+ glide_sync_client.wait(1, -1)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lolwut(self, glide_sync_client: GlideSync):
+ result = glide_sync_client.lolwut()
+ assert b"Redis ver. " in result
+ result = glide_sync_client.lolwut(parameters=[])
+ assert b"Redis ver. " in result
+ result = glide_sync_client.lolwut(parameters=[50, 20])
+ assert b"Redis ver. " in result
+ result = glide_sync_client.lolwut(6)
+ assert b"Redis ver. " in result
+ result = glide_sync_client.lolwut(5, [30, 4, 4])
+ assert b"Redis ver. " in result
+
+ if isinstance(glide_sync_client, GlideClusterClient):
+ # test with multi-node route
+ result = glide_sync_client.lolwut(route=AllNodes())
+ assert isinstance(result, dict)
+ result_decoded = cast(dict, convert_bytes_to_string_object(result))
+ assert result_decoded is not None
+ for node_result in result_decoded.values():
+ assert "Redis ver. " in node_result
+
+ result = glide_sync_client.lolwut(parameters=[10, 20], route=AllNodes())
+ assert isinstance(result, dict)
+ result_decoded = cast(dict, convert_bytes_to_string_object(result))
+ assert result_decoded is not None
+ for node_result in result_decoded.values():
+ assert "Redis ver. " in node_result
+
+ # test with single-node route
+ result = glide_sync_client.lolwut(2, route=RandomNode())
+ assert isinstance(result, bytes)
+ assert b"Redis ver. " in result
+
+ result = glide_sync_client.lolwut(2, [10, 20], RandomNode())
+ assert isinstance(result, bytes)
+ assert b"Redis ver. " in result
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_cluster_client_random_key(self, glide_sync_client: GlideClusterClient):
+ key = get_random_string(10)
+
+ # setup: delete all keys
+ assert glide_sync_client.flushall(FlushMode.SYNC)
+
+ # no keys exists, so random_key returns None
+ assert glide_sync_client.random_key() is None
+
+ assert glide_sync_client.set(key, "foo") == OK
+ # `key` should be the only existing key, so random_key should return `key`
+ assert glide_sync_client.random_key() == key.encode()
+ assert glide_sync_client.random_key(AllPrimaries()) == key.encode()
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_standalone_client_random_key(self, glide_sync_client: GlideClient):
+ key = get_random_string(10)
+
+ # setup: delete all keys in DB 0 and DB 1
+ assert glide_sync_client.select(0) == OK
+ assert glide_sync_client.flushdb(FlushMode.SYNC) == OK
+ assert glide_sync_client.select(1) == OK
+ assert glide_sync_client.flushdb(FlushMode.SYNC) == OK
+
+ # no keys exist so random_key returns None
+ assert glide_sync_client.random_key() is None
+ # set `key` in DB 1
+ assert glide_sync_client.set(key, "foo") == OK
+ # `key` should be the only key in the database
+ assert glide_sync_client.random_key() == key.encode()
+
+ # switch back to DB 0
+ assert glide_sync_client.select(0) == OK
+ # DB 0 should still have no keys, so random_key should still return None
+ assert glide_sync_client.random_key() is None
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_dump_restore(self, glide_sync_client: GlideSync):
+ key1 = f"{{key}}-1{get_random_string(10)}"
+ key2 = f"{{key}}-2{get_random_string(10)}"
+ key3 = f"{{key}}-3{get_random_string(10)}"
+ nonExistingKey = f"{{key}}-4{get_random_string(10)}"
+ value = get_random_string(5)
+
+ glide_sync_client.set(key1, value)
+
+ # Dump an existing key
+ bytesData = glide_sync_client.dump(key1)
+ assert bytesData is not None
+
+ # Dump non-existing key
+ assert glide_sync_client.dump(nonExistingKey) is None
+
+ # Restore to a new key and verify its value
+ assert glide_sync_client.restore(key2, 0, bytesData) == OK
+ newValue = glide_sync_client.get(key2)
+ assert newValue == value.encode()
+
+ # Restore to an existing key
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.restore(key2, 0, bytesData)
+ assert "Target key name already exists" in str(e)
+
+ # Restore using a value with checksum error
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.restore(key3, 0, value.encode())
+ assert "payload version or checksum are wrong" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_dump_restore_options(self, glide_sync_client: GlideSync):
+ key1 = f"{{key}}-1{get_random_string(10)}"
+ key2 = f"{{key}}-2{get_random_string(10)}"
+ key3 = f"{{key}}-3{get_random_string(10)}"
+ value = get_random_string(5)
+
+ glide_sync_client.set(key1, value)
+
+ # Dump an existing key
+ bytesData = glide_sync_client.dump(key1)
+ assert bytesData is not None
+
+ # Restore without option
+ assert glide_sync_client.restore(key2, 0, bytesData) == OK
+
+ # Restore with REPLACE option
+ assert glide_sync_client.restore(key2, 0, bytesData, replace=True) == OK
+
+ # Restore to an existing key holding different value with REPLACE option
+ assert glide_sync_client.sadd(key3, ["a"]) == 1
+ assert glide_sync_client.restore(key3, 0, bytesData, replace=True) == OK
+
+ # Restore with REPLACE, ABSTTL, and positive TTL
+ assert (
+ glide_sync_client.restore(key2, 1000, bytesData, replace=True, absttl=True)
+ == OK
+ )
+
+ # Restore with REPLACE, ABSTTL, and negative TTL
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.restore(key2, -10, bytesData, replace=True, absttl=True)
+ assert "Invalid TTL value" in str(e)
+
+ # Restore with REPLACE and positive idletime
+ assert (
+ glide_sync_client.restore(key2, 0, bytesData, replace=True, idletime=10)
+ == OK
+ )
+
+ # Restore with REPLACE and negative idletime
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.restore(key2, 0, bytesData, replace=True, idletime=-10)
+ assert "Invalid IDLETIME value" in str(e)
+
+ # Restore with REPLACE and positive frequency
+ assert (
+ glide_sync_client.restore(key2, 0, bytesData, replace=True, frequency=10)
+ == OK
+ )
+
+ # Restore with REPLACE and negative frequency
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.restore(key2, 0, bytesData, replace=True, frequency=-10)
+ assert "Invalid FREQ value" in str(e)
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lcs(self, glide_sync_client: GlideClient):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+ key1 = "testKey1"
+ value1 = "abcd"
+ key2 = "testKey2"
+ value2 = "axcd"
+ nonexistent_key = "nonexistent_key"
+ expected_subsequence = "acd"
+ expected_subsequence_with_nonexistent_key = ""
+ assert glide_sync_client.mset({key1: value1, key2: value2}) == OK
+ assert glide_sync_client.lcs(key1, key2) == expected_subsequence.encode()
+ assert (
+ glide_sync_client.lcs(key1, nonexistent_key)
+ == expected_subsequence_with_nonexistent_key.encode()
+ )
+ lcs_non_string_key = "lcs_non_string_key"
+ assert glide_sync_client.sadd(lcs_non_string_key, ["Hello", "world"]) == 2
+ with pytest.raises(RequestError):
+ glide_sync_client.lcs(key1, lcs_non_string_key)
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lcs_len(self, glide_sync_client: GlideClient):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+ key1 = "testKey1"
+ value1 = "abcd"
+ key2 = "testKey2"
+ value2 = "axcd"
+ nonexistent_key = "nonexistent_key"
+ expected_subsequence_length = 3
+ expected_subsequence_length_with_nonexistent_key = 0
+ assert glide_sync_client.mset({key1: value1, key2: value2}) == OK
+ assert glide_sync_client.lcs_len(key1, key2) == expected_subsequence_length
+ assert (
+ glide_sync_client.lcs_len(key1, nonexistent_key)
+ == expected_subsequence_length_with_nonexistent_key
+ )
+ lcs_non_string_key = "lcs_non_string_key"
+ assert glide_sync_client.sadd(lcs_non_string_key, ["Hello", "world"]) == 2
+ with pytest.raises(RequestError):
+ glide_sync_client.lcs_len(key1, lcs_non_string_key)
+
+ @pytest.mark.parametrize("cluster_mode", [False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lcs_idx(self, glide_sync_client: GlideClient):
+ min_version = "7.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+ key1 = "testKey1"
+ value1 = "abcd1234"
+ key2 = "testKey2"
+ value2 = "bcdef1234"
+ nonexistent_key = "nonexistent_key"
+ expected_response_no_min_match_len_no_with_match_len = {
+ b"matches": [
+ [
+ [4, 7],
+ [5, 8],
+ ],
+ [
+ [1, 3],
+ [0, 2],
+ ],
+ ],
+ b"len": 7,
+ }
+ expected_response_with_min_match_len_equals_four_no_with_match_len = {
+ b"matches": [
+ [
+ [4, 7],
+ [5, 8],
+ ],
+ ],
+ b"len": 7,
+ }
+ expected_response_no_min_match_len_with_match_len = {
+ b"matches": [
+ [
+ [4, 7],
+ [5, 8],
+ 4,
+ ],
+ [
+ [1, 3],
+ [0, 2],
+ 3,
+ ],
+ ],
+ b"len": 7,
+ }
+ expected_response_with_min_match_len_equals_four_and_with_match_len = {
+ b"matches": [
+ [
+ [4, 7],
+ [5, 8],
+ 4,
+ ],
+ ],
+ b"len": 7,
+ }
+ expected_response_with_nonexistent_key = {
+ b"matches": [],
+ b"len": 0,
+ }
+ assert glide_sync_client.mset({key1: value1, key2: value2}) == OK
+ assert (
+ glide_sync_client.lcs_idx(key1, key2)
+ == expected_response_no_min_match_len_no_with_match_len
+ )
+ assert (
+ glide_sync_client.lcs_idx(key1, key2, min_match_len=4)
+ == expected_response_with_min_match_len_equals_four_no_with_match_len
+ )
+ assert (
+ # negative min_match_len should have no affect on the output
+ glide_sync_client.lcs_idx(key1, key2, min_match_len=-3)
+ == expected_response_no_min_match_len_no_with_match_len
+ )
+ assert (
+ glide_sync_client.lcs_idx(key1, key2, with_match_len=True)
+ == expected_response_no_min_match_len_with_match_len
+ )
+ assert (
+ glide_sync_client.lcs_idx(key1, key2, min_match_len=4, with_match_len=True)
+ == expected_response_with_min_match_len_equals_four_and_with_match_len
+ )
+ assert (
+ glide_sync_client.lcs_idx(key1, nonexistent_key)
+ == expected_response_with_nonexistent_key
+ )
+ lcs_non_string_key = "lcs_non_string_key"
+ assert glide_sync_client.sadd(lcs_non_string_key, ["Hello", "world"]) == 2
+ with pytest.raises(RequestError):
+ glide_sync_client.lcs_idx(key1, lcs_non_string_key)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_watch(self, glide_sync_client: GlideClient):
+ # watched key didn't change outside of transaction before transaction execution, transaction will execute
+ assert glide_sync_client.set("key1", "original_value") == OK
+ assert glide_sync_client.watch(["key1"]) == OK
+ transaction = Transaction()
+ transaction.set("key1", "transaction_value")
+ transaction.get("key1")
+ assert glide_sync_client.exec(transaction) is not None
+
+ # watched key changed outside of transaction before transaction execution, transaction will not execute
+ assert glide_sync_client.set("key1", "original_value") == OK
+ assert glide_sync_client.watch(["key1"]) == OK
+ transaction = Transaction()
+ transaction.set("key1", "transaction_value")
+ assert glide_sync_client.set("key1", "standalone_value") == OK
+ transaction.get("key1")
+ assert glide_sync_client.exec(transaction) is None
+
+ # empty list not supported
+ with pytest.raises(RequestError):
+ glide_sync_client.watch([])
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_unwatch(self, glide_sync_client: GlideClient):
+
+ # watched key unwatched before transaction execution even if changed
+ # outside of transaction, transaction will still execute
+ assert glide_sync_client.set("key1", "original_value") == OK
+ assert glide_sync_client.watch(["key1"]) == OK
+ transaction = Transaction()
+ transaction.set("key1", "transaction_value")
+ assert glide_sync_client.set("key1", "standalone_value") == OK
+ transaction.get("key1")
+ assert glide_sync_client.unwatch() == OK
+ result = glide_sync_client.exec(transaction)
+ assert result is not None
+ assert isinstance(result, list)
+ assert len(result) == 2
+ assert result[0] == "OK"
+ assert result[1] == b"transaction_value"
+
+ # UNWATCH returns OK when there no watched keys
+ assert glide_sync_client.unwatch() == OK
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_unwatch_with_route(self, glide_sync_client: GlideClusterClient):
+ assert glide_sync_client.unwatch(RandomNode()) == OK
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_lpos(self, glide_sync_client: GlideSync):
+ min_version = "6.0.6"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ # TODO: change it to pytest fixture after we'll implement a sync client
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+ key = f"{{key}}-1{get_random_string(5)}"
+ non_list_key = f"{{key}}-2{get_random_string(5)}"
+ mylist: List[TEncodable] = ["a", "a", "b", "c", "a", "b"]
+
+ # basic case
+ glide_sync_client.rpush(key, mylist)
+ assert glide_sync_client.lpos(key, "b") == 2
+
+ # reverse traversal
+ assert glide_sync_client.lpos(key, "b", -2) == 2
+
+ # unlimited comparisons
+ assert glide_sync_client.lpos(key, "a", 1, None, 0) == 0
+
+ # limited comparisons
+ assert glide_sync_client.lpos(key, "c", 1, None, 2) is None
+
+ # element does not exist
+ assert glide_sync_client.lpos(key, "non_existing") is None
+
+ # with count
+ assert glide_sync_client.lpos(key, "a", 1, 0, 0) == [0, 1, 4]
+
+ # with count and rank
+ assert glide_sync_client.lpos(key, "a", -2, 0, 0) == [1, 0]
+
+ # key does not exist
+ assert glide_sync_client.lpos("non_existing", "non_existing") is None
+
+ # invalid rank value
+ with pytest.raises(RequestError):
+ glide_sync_client.lpos(key, "a", 0)
+
+ # invalid count
+ with pytest.raises(RequestError):
+ glide_sync_client.lpos(non_list_key, "a", None, -1)
+
+ # invalid max_len
+ with pytest.raises(RequestError):
+ glide_sync_client.lpos(non_list_key, "a", None, None, -1)
+
+ # wrong data type
+ glide_sync_client.set(non_list_key, "non_list_value")
+ with pytest.raises(RequestError):
+ glide_sync_client.lpos(non_list_key, "a")
+
+
+class TestMultiKeyCommandCrossSlot:
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_multi_key_command_returns_cross_slot_error(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ promises: List[Any] = [
+ glide_sync_client.blpop(["abc", "zxy", "lkn"], 0.1),
+ glide_sync_client.brpop(["abc", "zxy", "lkn"], 0.1),
+ glide_sync_client.rename("abc", "zxy"),
+ glide_sync_client.zdiffstore("abc", ["zxy", "lkn"]),
+ glide_sync_client.zdiff(["abc", "zxy", "lkn"]),
+ glide_sync_client.zdiff_withscores(["abc", "zxy", "lkn"]),
+ glide_sync_client.zrangestore("abc", "zxy", RangeByIndex(0, -1)),
+ glide_sync_client.zinterstore(
+ "{xyz}", cast(Union[List[Union[TEncodable]]], ["{abc}", "{def}"])
+ ),
+ glide_sync_client.zunionstore(
+ "{xyz}", cast(Union[List[Union[TEncodable]]], ["{abc}", "{def}"])
+ ),
+ glide_sync_client.bzpopmin(["abc", "zxy", "lkn"], 0.5),
+ glide_sync_client.bzpopmax(["abc", "zxy", "lkn"], 0.5),
+ glide_sync_client.smove("abc", "def", "_"),
+ glide_sync_client.sunionstore("abc", ["zxy", "lkn"]),
+ glide_sync_client.sinter(["abc", "zxy", "lkn"]),
+ glide_sync_client.sinterstore("abc", ["zxy", "lkn"]),
+ glide_sync_client.sdiff(["abc", "zxy", "lkn"]),
+ glide_sync_client.sdiffstore("abc", ["def", "ghi"]),
+ glide_sync_client.renamenx("abc", "def"),
+ glide_sync_client.pfcount(["def", "ghi"]),
+ glide_sync_client.pfmerge("abc", ["def", "ghi"]),
+ glide_sync_client.zinter(["def", "ghi"]),
+ glide_sync_client.zinter_withscores(
+ cast(Union[List[TEncodable]], ["def", "ghi"])
+ ),
+ glide_sync_client.zunion(["def", "ghi"]),
+ glide_sync_client.zunion_withscores(cast(List[TEncodable], ["def", "ghi"])),
+ glide_sync_client.sort_store("abc", "zxy"),
+ glide_sync_client.lmove("abc", "zxy", ListDirection.LEFT, ListDirection.LEFT),
+ glide_sync_client.blmove(
+ "abc", "zxy", ListDirection.LEFT, ListDirection.LEFT, 1
+ ),
+ glide_sync_client.msetnx({"abc": "abc", "zxy": "zyx"}),
+ glide_sync_client.sunion(["def", "ghi"]),
+ glide_sync_client.bitop(BitwiseOperation.OR, "abc", ["zxy", "lkn"]),
+ glide_sync_client.xread({"abc": "0-0", "zxy": "0-0"}),
+ ]
+
+ if not check_if_server_version_lt(glide_sync_client, "6.2.0"):
+ promises.extend(
+ [
+ glide_sync_client.geosearchstore(
+ "abc",
+ "zxy",
+ GeospatialData(15, 37),
+ GeoSearchByBox(400, 400, GeoUnit.KILOMETERS),
+ ),
+ glide_sync_client.copy("abc", "zxy", replace=True),
+ ]
+ )
+
+ if not check_if_server_version_lt(glide_sync_client, "7.0.0"):
+ promises.extend(
+ [
+ glide_sync_client.bzmpop(["abc", "zxy", "lkn"], ScoreFilter.MAX, 0.1),
+ glide_sync_client.zintercard(["abc", "def"]),
+ glide_sync_client.zmpop(["abc", "zxy", "lkn"], ScoreFilter.MAX),
+ glide_sync_client.sintercard(["def", "ghi"]),
+ glide_sync_client.lmpop(["def", "ghi"], ListDirection.LEFT),
+ glide_sync_client.blmpop(["def", "ghi"], ListDirection.LEFT, 1),
+ glide_sync_client.lcs("abc", "def"),
+ glide_sync_client.lcs_len("abc", "def"),
+ glide_sync_client.lcs_idx("abc", "def"),
+ glide_sync_client.fcall("func", ["abc", "zxy", "lkn"], []),
+ glide_sync_client.fcall_ro("func", ["abc", "zxy", "lkn"], []),
+ ]
+ )
+
+ for promise in promises:
+ with pytest.raises(RequestError) as e:
+ promise
+ assert "crossslot" in str(e).lower()
+
+ # TODO bz*, zunion, sdiff and others - all rest multi-key commands except ones tested below
+ pass
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_multi_key_command_routed_to_multiple_nodes(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ glide_sync_client.exists(["abc", "zxy", "lkn"])
+ glide_sync_client.unlink(["abc", "zxy", "lkn"])
+ glide_sync_client.delete(["abc", "zxy", "lkn"])
+ glide_sync_client.mget(["abc", "zxy", "lkn"])
+ glide_sync_client.mset({"abc": "1", "zxy": "2", "lkn": "3"})
+ glide_sync_client.touch(["abc", "zxy", "lkn"])
+ glide_sync_client.watch(["abc", "zxy", "lkn"])
+
+
+class TestCommandsUnitTests:
+ def test_sync_expiry_cmd_args(self):
+ exp_sec = ExpirySet(ExpiryType.SEC, 5)
+ assert exp_sec.get_cmd_args() == ["EX", "5"]
+
+ exp_sec_timedelta = ExpirySet(ExpiryType.SEC, timedelta(seconds=5))
+ assert exp_sec_timedelta.get_cmd_args() == ["EX", "5"]
+
+ exp_millsec = ExpirySet(ExpiryType.MILLSEC, 5)
+ assert exp_millsec.get_cmd_args() == ["PX", "5"]
+
+ exp_millsec_timedelta = ExpirySet(ExpiryType.MILLSEC, timedelta(seconds=5))
+ assert exp_millsec_timedelta.get_cmd_args() == ["PX", "5000"]
+
+ exp_millsec_timedelta = ExpirySet(ExpiryType.MILLSEC, timedelta(seconds=5))
+ assert exp_millsec_timedelta.get_cmd_args() == ["PX", "5000"]
+
+ exp_unix_sec = ExpirySet(ExpiryType.UNIX_SEC, 1682575739)
+ assert exp_unix_sec.get_cmd_args() == ["EXAT", "1682575739"]
+
+ exp_unix_sec_datetime = ExpirySet(
+ ExpiryType.UNIX_SEC,
+ datetime(2023, 4, 27, 23, 55, 59, 342380, timezone.utc),
+ )
+ assert exp_unix_sec_datetime.get_cmd_args() == ["EXAT", "1682639759"]
+
+ exp_unix_millisec = ExpirySet(ExpiryType.UNIX_MILLSEC, 1682586559964)
+ assert exp_unix_millisec.get_cmd_args() == ["PXAT", "1682586559964"]
+
+ exp_unix_millisec_datetime = ExpirySet(
+ ExpiryType.UNIX_MILLSEC,
+ datetime(2023, 4, 27, 23, 55, 59, 342380, timezone.utc),
+ )
+ assert exp_unix_millisec_datetime.get_cmd_args() == ["PXAT", "1682639759342"]
+
+ def test_sync_get_expiry_cmd_args(self):
+ exp_sec = ExpiryGetEx(ExpiryTypeGetEx.SEC, 5)
+ assert exp_sec.get_cmd_args() == ["EX", "5"]
+
+ exp_sec_timedelta = ExpiryGetEx(ExpiryTypeGetEx.SEC, timedelta(seconds=5))
+ assert exp_sec_timedelta.get_cmd_args() == ["EX", "5"]
+
+ exp_millsec = ExpiryGetEx(ExpiryTypeGetEx.MILLSEC, 5)
+ assert exp_millsec.get_cmd_args() == ["PX", "5"]
+
+ exp_millsec_timedelta = ExpiryGetEx(
+ ExpiryTypeGetEx.MILLSEC, timedelta(seconds=5)
+ )
+ assert exp_millsec_timedelta.get_cmd_args() == ["PX", "5000"]
+
+ exp_millsec_timedelta = ExpiryGetEx(
+ ExpiryTypeGetEx.MILLSEC, timedelta(seconds=5)
+ )
+ assert exp_millsec_timedelta.get_cmd_args() == ["PX", "5000"]
+
+ exp_unix_sec = ExpiryGetEx(ExpiryTypeGetEx.UNIX_SEC, 1682575739)
+ assert exp_unix_sec.get_cmd_args() == ["EXAT", "1682575739"]
+
+ exp_unix_sec_datetime = ExpiryGetEx(
+ ExpiryTypeGetEx.UNIX_SEC,
+ datetime(2023, 4, 27, 23, 55, 59, 342380, timezone.utc),
+ )
+ assert exp_unix_sec_datetime.get_cmd_args() == ["EXAT", "1682639759"]
+
+ exp_unix_millisec = ExpiryGetEx(ExpiryTypeGetEx.UNIX_MILLSEC, 1682586559964)
+ assert exp_unix_millisec.get_cmd_args() == ["PXAT", "1682586559964"]
+
+ exp_unix_millisec_datetime = ExpiryGetEx(
+ ExpiryTypeGetEx.UNIX_MILLSEC,
+ datetime(2023, 4, 27, 23, 55, 59, 342380, timezone.utc),
+ )
+ assert exp_unix_millisec_datetime.get_cmd_args() == ["PXAT", "1682639759342"]
+
+ exp_persist = ExpiryGetEx(
+ ExpiryTypeGetEx.PERSIST,
+ None,
+ )
+ assert exp_persist.get_cmd_args() == ["PERSIST"]
+
+ def test_sync_expiry_raises_on_value_error(self):
+ with pytest.raises(ValueError):
+ ExpirySet(ExpiryType.SEC, 5.5)
+
+ def test_sync_is_single_response(self):
+ assert is_single_response("This is a string value", "")
+ assert is_single_response(["value", "value"], [""])
+ assert not is_single_response(
+ [["value", ["value"]], ["value", ["valued"]]], [""]
+ )
+ assert is_single_response(None, None)
+
+
+@pytest.mark.asyncio
+class TestClusterRoutes:
+ def cluster_route_custom_command_multi_nodes(
+ self,
+ glide_sync_client: GlideClusterClient,
+ route: Route,
+ ):
+ cluster_nodes = glide_sync_client.custom_command(["CLUSTER", "NODES"])
+ assert isinstance(cluster_nodes, bytes)
+ cluster_nodes = cluster_nodes.decode()
+ assert isinstance(cluster_nodes, (str, list))
+ cluster_nodes = get_first_result(cluster_nodes)
+ num_of_nodes = len(cluster_nodes.splitlines())
+ assert isinstance(cluster_nodes, (str, list))
+ expected_num_of_results = (
+ num_of_nodes
+ if isinstance(route, AllNodes)
+ else num_of_nodes - cluster_nodes.count("slave")
+ )
+ expected_primary_count = cluster_nodes.count("master")
+ expected_replica_count = (
+ cluster_nodes.count("slave") if isinstance(route, AllNodes) else 0
+ )
+
+ all_results = glide_sync_client.custom_command(["INFO", "REPLICATION"], route)
+ assert isinstance(all_results, dict)
+ assert len(all_results) == expected_num_of_results
+ primary_count = 0
+ replica_count = 0
+ for _, info_res in all_results.items():
+ assert isinstance(info_res, bytes)
+ info_res = info_res.decode()
+ assert "role:master" in info_res or "role:slave" in info_res
+ if "role:master" in info_res:
+ primary_count += 1
+ else:
+ replica_count += 1
+ assert primary_count == expected_primary_count
+ assert replica_count == expected_replica_count
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_cluster_route_custom_command_all_nodes(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ self.cluster_route_custom_command_multi_nodes(glide_sync_client, AllNodes())
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_cluster_route_custom_command_all_primaries(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ self.cluster_route_custom_command_multi_nodes(
+ glide_sync_client, AllPrimaries()
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_cluster_route_custom_command_random_node(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ info_res = glide_sync_client.custom_command(
+ ["INFO", "REPLICATION"], RandomNode()
+ )
+ assert isinstance(info_res, bytes)
+ info_res = info_res.decode()
+ assert type(info_res) is str
+ assert "role:master" in info_res or "role:slave" in info_res
+
+ def cluster_route_custom_command_slot_route(
+ self, glide_sync_client: GlideClusterClient, is_slot_key: bool
+ ):
+ route_class = SlotKeyRoute if is_slot_key else SlotIdRoute
+ route_second_arg = "foo" if is_slot_key else 4000
+ primary_res = glide_sync_client.custom_command(
+ ["CLUSTER", "NODES"], route_class(SlotType.PRIMARY, route_second_arg) # type: ignore
+ )
+ assert isinstance(primary_res, bytes)
+ primary_res = primary_res.decode()
+
+ assert type(primary_res) is str
+ assert "myself,master" in primary_res
+ expected_primary_node_id = ""
+ for node_line in primary_res.splitlines():
+ if "myself" in node_line:
+ expected_primary_node_id = node_line.split(" ")[0]
+
+ replica_res = glide_sync_client.custom_command(
+ ["CLUSTER", "NODES"], route_class(SlotType.REPLICA, route_second_arg) # type: ignore
+ )
+ assert isinstance(replica_res, bytes)
+ replica_res = replica_res.decode()
+
+ assert isinstance(replica_res, str)
+ assert "myself,slave" in replica_res
+ for node_line in replica_res:
+ if "myself" in node_line:
+ primary_node_id = node_line.split(" ")[3]
+ assert primary_node_id == expected_primary_node_id
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_cluster_route_custom_command_slot_key_route(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ self.cluster_route_custom_command_slot_route(glide_sync_client, True)
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_cluster_route_custom_command_slot_id_route(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ self.cluster_route_custom_command_slot_route(glide_sync_client, False)
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_info_random_route(self, glide_sync_client: GlideClusterClient):
+ info = glide_sync_client.info([InfoSection.SERVER], RandomNode())
+ assert b"# Server" in info
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_cluster_route_by_address_reaches_correct_node(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ # returns the line that contains the word "myself", up to that point. This is done because the values after it might change with time.
+ def clean_result(value: TResult):
+ assert type(value) is str
+ for line in value.splitlines():
+ if "myself" in line:
+ return line.split("myself")[0]
+ raise Exception(
+ f"Couldn't find 'myself' in the cluster nodes output: {value}"
+ )
+
+ cluster_nodes = glide_sync_client.custom_command(
+ ["cluster", "nodes"], RandomNode()
+ )
+ assert isinstance(cluster_nodes, bytes)
+ cluster_nodes = clean_result(cluster_nodes.decode())
+
+ assert isinstance(cluster_nodes, str)
+ host = cluster_nodes.split(" ")[1].split("@")[0]
+
+ second_result = glide_sync_client.custom_command(
+ ["cluster", "nodes"], ByAddressRoute(host)
+ )
+ assert isinstance(second_result, bytes)
+ second_result = clean_result(second_result.decode())
+
+ assert cluster_nodes == second_result
+
+ host, port = host.split(":")
+ port_as_int = int(port)
+
+ third_result = glide_sync_client.custom_command(
+ ["cluster", "nodes"], ByAddressRoute(host, port_as_int)
+ )
+ assert isinstance(third_result, bytes)
+ third_result = clean_result(third_result.decode())
+
+ assert cluster_nodes == third_result
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_cluster_fail_routing_by_address_if_no_port_is_provided(
+ self, glide_sync_client: GlideClusterClient
+ ):
+ with pytest.raises(RequestError):
+ glide_sync_client.info(route=ByAddressRoute("foo"))
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_cluster_flushdb(self, glide_sync_client: GlideClusterClient):
+ min_version = "6.2.0"
+ key = f"{{key}}-1{get_random_string(5)}"
+ value = get_random_string(5)
+
+ glide_sync_client.set(key, value)
+ assert glide_sync_client.dbsize() > 0
+ assert glide_sync_client.flushdb(route=AllPrimaries()) == OK
+ assert glide_sync_client.dbsize() == 0
+
+ glide_sync_client.set(key, value)
+ assert glide_sync_client.dbsize() > 0
+ assert glide_sync_client.flushdb(FlushMode.ASYNC, AllPrimaries()) == OK
+ assert glide_sync_client.dbsize() == 0
+
+ if not check_if_server_version_lt(glide_sync_client, min_version):
+ glide_sync_client.set(key, value)
+ assert glide_sync_client.dbsize() > 0
+ assert glide_sync_client.flushdb(FlushMode.SYNC, AllPrimaries()) == OK
+ assert glide_sync_client.dbsize() == 0
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_sscan(self, glide_sync_client: GlideClusterClient):
+ key1 = f"{{key}}-1{get_random_string(5)}"
+ key2 = f"{{key}}-2{get_random_string(5)}"
+ initial_cursor = "0"
+ result_cursor_index = 0
+ result_collection_index = 1
+ default_count = 10
+ num_members: List[TEncodable] = list(
+ map(str, range(50000))
+ ) # Use large dataset to force an iterative cursor.
+ char_members: List[TEncodable] = ["a", "b", "c", "d", "e"]
+
+ # Empty set
+ result = glide_sync_client.sscan(key1, initial_cursor)
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert result[result_collection_index] == []
+
+ # Negative cursor
+ if check_if_server_version_lt(glide_sync_client, "8.0.0"):
+ result = glide_sync_client.sscan(key1, "-1")
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert result[result_collection_index] == []
+ else:
+ with pytest.raises(RequestError):
+ glide_sync_client.sscan(key2, "-1")
+
+ # Result contains the whole set
+ assert glide_sync_client.sadd(key1, char_members) == len(char_members)
+ result = glide_sync_client.sscan(key1, initial_cursor)
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert len(result[result_collection_index]) == len(char_members)
+ assert set(result[result_collection_index]).issubset(
+ cast(list, convert_string_to_bytes_object(char_members))
+ )
+
+ result = glide_sync_client.sscan(key1, initial_cursor, match="a")
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert set(result[result_collection_index]).issubset(set([b"a"]))
+
+ # Result contains a subset of the key
+ assert glide_sync_client.sadd(key1, num_members) == len(num_members)
+ result_cursor = "0"
+ result_values = set() # type: set[bytes]
+ result = cast(
+ list,
+ convert_bytes_to_string_object(
+ glide_sync_client.sscan(key1, result_cursor)
+ ),
+ )
+ result_cursor = str(result[result_cursor_index])
+ result_values.update(result[result_collection_index]) # type: ignore
+
+ # 0 is returned for the cursor of the last iteration.
+ while result_cursor != "0":
+ next_result = cast(
+ list,
+ convert_bytes_to_string_object(
+ glide_sync_client.sscan(key1, result_cursor)
+ ),
+ )
+ next_result_cursor = str(next_result[result_cursor_index])
+ assert next_result_cursor != result_cursor
+
+ assert not set(result[result_collection_index]).issubset(
+ set(next_result[result_collection_index])
+ )
+ result_values.update(next_result[result_collection_index])
+ result = next_result
+ result_cursor = next_result_cursor
+ assert set(num_members).issubset(result_values)
+ assert set(char_members).issubset(result_values)
+
+ # Test match pattern
+ result = glide_sync_client.sscan(key1, initial_cursor, match="*")
+ assert result[result_cursor_index] != "0"
+ assert len(result[result_collection_index]) >= default_count
+
+ # Test count
+ result = glide_sync_client.sscan(key1, initial_cursor, count=20)
+ assert result[result_cursor_index] != "0"
+ assert len(result[result_collection_index]) >= 20
+
+ # Test count with match returns a non-empty list
+ result = glide_sync_client.sscan(key1, initial_cursor, match="1*", count=20)
+ assert result[result_cursor_index] != "0"
+ assert len(result[result_collection_index]) >= 0
+
+ # Exceptions
+ # Non-set key
+ assert glide_sync_client.set(key2, "test") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.sscan(key2, initial_cursor)
+ with pytest.raises(RequestError):
+ glide_sync_client.sscan(key2, initial_cursor, match="test", count=20)
+
+ # Negative count
+ with pytest.raises(RequestError):
+ glide_sync_client.sscan(key2, initial_cursor, count=-1)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_zscan(self, glide_sync_client: GlideClusterClient):
+ key1 = f"{{key}}-1{get_random_string(5)}"
+ key2 = f"{{key}}-2{get_random_string(5)}"
+ initial_cursor = "0"
+ result_cursor_index = 0
+ result_collection_index = 1
+ default_count = 20
+ num_map: Dict[TEncodable, float] = {}
+ num_map_with_str_scores = {}
+ for i in range(50000): # Use large dataset to force an iterative cursor.
+ num_map.update({"value " + str(i): i})
+ num_map_with_str_scores.update({"value " + str(i): str(i)})
+ char_map: Mapping[TEncodable, float] = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4}
+ char_map_with_str_scores = {
+ "a": "0",
+ "b": "1",
+ "c": "2",
+ "d": "3",
+ "e": "4",
+ }
+
+ convert_list_to_dict = lambda list: {
+ list[i]: list[i + 1] for i in range(0, len(list), 2)
+ }
+
+ # Empty set
+ result = glide_sync_client.zscan(key1, initial_cursor)
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert result[result_collection_index] == []
+
+ # Negative cursor
+ if check_if_server_version_lt(glide_sync_client, "8.0.0"):
+ result = glide_sync_client.zscan(key1, "-1")
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert result[result_collection_index] == []
+ else:
+ with pytest.raises(RequestError):
+ glide_sync_client.zscan(key2, "-1")
+
+ # Result contains the whole set
+ assert glide_sync_client.zadd(key1, char_map) == len(char_map)
+ result = glide_sync_client.zscan(key1, initial_cursor)
+ result_collection = result[result_collection_index]
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert len(result_collection) == len(char_map) * 2
+ assert convert_list_to_dict(result_collection) == cast(
+ list, convert_string_to_bytes_object(char_map_with_str_scores)
+ )
+
+ result = glide_sync_client.zscan(key1, initial_cursor, match="a")
+ result_collection = result[result_collection_index]
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert convert_list_to_dict(result_collection) == {b"a": b"0"}
+
+ # Result contains a subset of the key
+ assert glide_sync_client.zadd(key1, num_map) == len(num_map)
+ full_result_map = {}
+ result = result = cast(
+ list,
+ convert_bytes_to_string_object(
+ glide_sync_client.zscan(key1, initial_cursor)
+ ),
+ )
+ result_cursor = str(result[result_cursor_index])
+ result_iteration_collection: Dict[str, str] = convert_list_to_dict(
+ result[result_collection_index]
+ )
+ full_result_map.update(result_iteration_collection)
+
+ # 0 is returned for the cursor of the last iteration.
+ while result_cursor != "0":
+ next_result = cast(
+ list,
+ convert_bytes_to_string_object(
+ glide_sync_client.zscan(key1, result_cursor)
+ ),
+ )
+ next_result_cursor = next_result[result_cursor_index]
+ assert next_result_cursor != result_cursor
+
+ next_result_collection = convert_list_to_dict(
+ next_result[result_collection_index]
+ )
+ assert result_iteration_collection != next_result_collection
+
+ full_result_map.update(next_result_collection)
+ result_iteration_collection = next_result_collection
+ result_cursor = next_result_cursor
+ num_map_with_str_scores.update(char_map_with_str_scores)
+ assert num_map_with_str_scores == full_result_map
+
+ # Test match pattern
+ result = glide_sync_client.zscan(key1, initial_cursor, match="*")
+ assert result[result_cursor_index] != b"0"
+ assert len(result[result_collection_index]) >= default_count
+
+ # Test count
+ result = glide_sync_client.zscan(key1, initial_cursor, count=20)
+ assert result[result_cursor_index] != b"0"
+ assert len(result[result_collection_index]) >= 20
+
+ # Test count with match returns a non-empty list
+ result = glide_sync_client.zscan(key1, initial_cursor, match="1*", count=20)
+ assert result[result_cursor_index] != b"0"
+ assert len(result[result_collection_index]) >= 0
+
+ # Test no_scores option
+ if not check_if_server_version_lt(glide_sync_client, "8.0.0"):
+ result = glide_sync_client.zscan(key1, initial_cursor, no_scores=True)
+ assert result[result_cursor_index] != b"0"
+ values_array = cast(List[bytes], result[result_collection_index])
+ # Verify that scores are not included
+ assert all(
+ item.startswith(b"value") and item.isascii() for item in values_array
+ )
+
+ # Exceptions
+ # Non-set key
+ assert glide_sync_client.set(key2, "test") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.zscan(key2, initial_cursor)
+ with pytest.raises(RequestError):
+ glide_sync_client.zscan(key2, initial_cursor, match="test", count=20)
+
+ # Negative count
+ with pytest.raises(RequestError):
+ glide_sync_client.zscan(key2, initial_cursor, count=-1)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_hscan(self, glide_sync_client: GlideClusterClient):
+ key1 = f"{{key}}-1{get_random_string(5)}"
+ key2 = f"{{key}}-2{get_random_string(5)}"
+ initial_cursor = "0"
+ result_cursor_index = 0
+ result_collection_index = 1
+ default_count = 20
+ num_map: dict[TEncodable, TEncodable] = {}
+ for i in range(50000): # Use large dataset to force an iterative cursor.
+ num_map.update({"field " + str(i): "value " + str(i)})
+ char_map: Dict[TEncodable, TEncodable] = {
+ "field a": "value a",
+ "field b": "value b",
+ "field c": "value c",
+ "field d": "value d",
+ "field e": "value e",
+ }
+
+ convert_list_to_dict = lambda list: {
+ list[i]: list[i + 1] for i in range(0, len(list), 2)
+ }
+
+ # Empty set
+ result = glide_sync_client.hscan(key1, initial_cursor)
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert result[result_collection_index] == []
+
+ # Negative cursor
+ if check_if_server_version_lt(glide_sync_client, "8.0.0"):
+ result = glide_sync_client.hscan(key1, "-1")
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert result[result_collection_index] == []
+ else:
+ with pytest.raises(RequestError):
+ glide_sync_client.hscan(key2, "-1")
+
+ # Result contains the whole set
+ assert glide_sync_client.hset(key1, char_map) == len(char_map)
+ result = glide_sync_client.hscan(key1, initial_cursor)
+ result_collection = result[result_collection_index]
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert len(result_collection) == len(char_map) * 2
+ assert convert_list_to_dict(result_collection) == cast(
+ dict, convert_string_to_bytes_object(char_map) # type: ignore
+ )
+
+ result = glide_sync_client.hscan(key1, initial_cursor, match="field a")
+ result_collection = result[result_collection_index]
+ assert result[result_cursor_index] == initial_cursor.encode()
+ assert convert_list_to_dict(result_collection) == {b"field a": b"value a"}
+
+ # Result contains a subset of the key
+ assert glide_sync_client.hset(key1, num_map) == len(num_map)
+ full_result_map = {}
+ result = result = cast(
+ list,
+ convert_bytes_to_string_object(
+ glide_sync_client.hscan(key1, initial_cursor)
+ ),
+ )
+ result_cursor = str(result[result_cursor_index])
+ result_iteration_collection: Dict[str, str] = convert_list_to_dict(
+ result[result_collection_index]
+ )
+ full_result_map.update(result_iteration_collection)
+
+ # 0 is returned for the cursor of the last iteration.
+ while result_cursor != "0":
+ next_result = cast(
+ list,
+ convert_bytes_to_string_object(
+ glide_sync_client.hscan(key1, result_cursor)
+ ),
+ )
+ next_result_cursor = next_result[result_cursor_index]
+ assert next_result_cursor != result_cursor
+
+ next_result_collection = convert_list_to_dict(
+ next_result[result_collection_index]
+ )
+ assert result_iteration_collection != next_result_collection
+
+ full_result_map.update(next_result_collection)
+ result_iteration_collection = next_result_collection
+ result_cursor = next_result_cursor
+ num_map.update(char_map)
+ assert num_map == full_result_map
+
+ # Test match pattern
+ result = glide_sync_client.hscan(key1, initial_cursor, match="*")
+ assert result[result_cursor_index] != b"0"
+ assert len(result[result_collection_index]) >= default_count
+
+ # Test count
+ result = glide_sync_client.hscan(key1, initial_cursor, count=20)
+ assert result[result_cursor_index] != b"0"
+ assert len(result[result_collection_index]) >= 20
+
+ # Test count with match returns a non-empty list
+ result = glide_sync_client.hscan(key1, initial_cursor, match="1*", count=20)
+ assert result[result_cursor_index] != b"0"
+ assert len(result[result_collection_index]) >= 0
+
+ # Test no_values option
+ if not check_if_server_version_lt(glide_sync_client, "8.0.0"):
+ result = glide_sync_client.hscan(key1, initial_cursor, no_values=True)
+ assert result[result_cursor_index] != b"0"
+ values_array = cast(List[bytes], result[result_collection_index])
+ # Verify that values are not included
+ assert all(
+ item.startswith(b"field") and item.isascii() for item in values_array
+ )
+
+ # Exceptions
+ # Non-hash key
+ assert glide_sync_client.set(key2, "test") == OK
+ with pytest.raises(RequestError):
+ glide_sync_client.hscan(key2, initial_cursor)
+ with pytest.raises(RequestError):
+ glide_sync_client.hscan(key2, initial_cursor, match="test", count=20)
+
+ # Negative count
+ with pytest.raises(RequestError):
+ glide_sync_client.hscan(key2, initial_cursor, count=-1)
+
+
+def script_kill_tests(
+ glide_sync_client: GlideSync, test_sync_client: TGlideClient, route: Optional[Route] = None
+):
+ """
+ shared tests for SCRIPT KILL used in routed and non-routed variants, clients are created in
+ respective tests with different test matrices.
+ """
+ # Verify that script_kill raises an error when no script is running
+ with pytest.raises(RequestError) as e:
+ glide_sync_client.script_kill()
+ assert "No scripts in execution right now" in str(e)
+
+ # Create a long-running script
+ long_script = Script(create_long_running_lua_script(10))
+
+ def run_long_script():
+ with pytest.raises(RequestError) as e:
+ if route is not None:
+ test_sync_client.invoke_script_route(long_script, route=route)
+ else:
+ test_sync_client.invoke_script(long_script)
+ assert "Script killed by user" in str(e)
+
+ def wait_and_kill_script():
+ asyncio.sleep(3) # Give some time for the script to start
+ timeout = 0
+ while timeout <= 5:
+ # keep trying to kill until we get an "OK"
+ try:
+ if route is not None:
+ result = cast(GlideClusterClient, glide_sync_client).script_kill(
+ route=route
+ )
+ else:
+ result = glide_sync_client.script_kill()
+ # we expect to get success
+ assert result == "OK"
+ break
+ except RequestError:
+ # a RequestError may occur if the script is not yet running
+ # sleep and try again
+ timeout += 0.5
+ asyncio.sleep(0.5)
+
+ # Run the long script and kill it
+ asyncio.gather(
+ run_long_script(),
+ wait_and_kill_script(),
+ )
+
+ # Verify that script_kill raises an error when no script is running
+ with pytest.raises(RequestError) as e:
+ if route is not None:
+ cast(GlideClusterClient, glide_sync_client).script_kill(route=route)
+ else:
+ glide_sync_client.script_kill()
+ assert "No scripts in execution right now" in str(e)
+
+ test_sync_client.close()
+
+
+@pytest.mark.asyncio
+class TestScripts:
+ @pytest.mark.smoke_test
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script(self, glide_sync_client: GlideSync):
+ key1 = get_random_string(10)
+ key2 = get_random_string(10)
+ script = Script("return 'Hello'")
+ assert glide_sync_client.invoke_script(script) == "Hello".encode()
+
+ script = Script("return redis.call('SET', KEYS[1], ARGV[1])")
+ assert (
+ glide_sync_client.invoke_script(script, keys=[key1], args=["value1"])
+ == "OK"
+ )
+ # Reuse the same script with different parameters.
+ assert (
+ glide_sync_client.invoke_script(script, keys=[key2], args=["value2"])
+ == "OK"
+ )
+ script = Script("return redis.call('GET', KEYS[1])")
+ assert (
+ glide_sync_client.invoke_script(script, keys=[key1]) == "value1".encode()
+ )
+ assert (
+ glide_sync_client.invoke_script(script, keys=[key2]) == "value2".encode()
+ )
+
+ @pytest.mark.smoke_test
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_binary(self, glide_sync_client: GlideSync):
+ key1 = bytes(get_random_string(10), "utf-8")
+ key2 = bytes(get_random_string(10), "utf-8")
+ script = Script(bytes("return 'Hello'", "utf-8"))
+ assert glide_sync_client.invoke_script(script) == "Hello".encode()
+
+ script = Script(bytes("return redis.call('SET', KEYS[1], ARGV[1])", "utf-8"))
+ assert (
+ glide_sync_client.invoke_script(
+ script, keys=[key1], args=[bytes("value1", "utf-8")]
+ )
+ == "OK"
+ )
+ # Reuse the same script with different parameters.
+ assert (
+ glide_sync_client.invoke_script(
+ script, keys=[key2], args=[bytes("value2", "utf-8")]
+ )
+ == "OK"
+ )
+ script = Script(bytes("return redis.call('GET', KEYS[1])", "utf-8"))
+ assert (
+ glide_sync_client.invoke_script(script, keys=[key1]) == "value1".encode()
+ )
+ assert (
+ glide_sync_client.invoke_script(script, keys=[key2]) == "value2".encode()
+ )
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_large_keys_no_args(self, request, cluster_mode, protocol):
+ glide_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000
+ )
+ length = 2**13 # 8kb
+ key = "0" * length
+ script = Script("return KEYS[1]")
+ assert glide_sync_client.invoke_script(script, keys=[key]) == key.encode()
+ glide_sync_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_large_args_no_keys(self, request, cluster_mode, protocol):
+ glide_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000
+ )
+ length = 2**12 # 4kb
+ arg1 = "0" * length
+ arg2 = "1" * length
+
+ script = Script("return ARGV[2]")
+ assert (
+ glide_sync_client.invoke_script(script, args=[arg1, arg2]) == arg2.encode()
+ )
+ glide_sync_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_large_keys_and_args(self, request, cluster_mode, protocol):
+ glide_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000
+ )
+ length = 2**12 # 4kb
+ key = "0" * length
+ arg = "1" * length
+
+ script = Script("return KEYS[1]")
+ assert (
+ glide_sync_client.invoke_script(script, keys=[key], args=[arg])
+ == key.encode()
+ )
+ glide_sync_client.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_exists(self, glide_sync_client: GlideSync, cluster_mode: bool):
+ cluster_mode = isinstance(glide_sync_client, GlideClusterClient)
+ script1 = Script("return 'Hello'")
+ script2 = Script("return 'World'")
+ script3 = Script("return 'Hello World'")
+
+ # Load script1 to all nodes, do not load script2 and load script3 with a SlotKeyRoute
+ glide_sync_client.invoke_script(script1)
+
+ if cluster_mode:
+ cast(GlideClusterClient, glide_sync_client).invoke_script_route(
+ script3, route=SlotKeyRoute(SlotType.PRIMARY, "1")
+ )
+ else:
+ glide_sync_client.invoke_script(script3)
+
+ # Get the SHA1 digests of the scripts
+ sha1_1 = script1.get_hash()
+ sha1_2 = script2.get_hash()
+ sha1_3 = script3.get_hash()
+ non_existent_sha1 = "0" * 40 # A SHA1 that doesn't exist
+ # Check existence of scripts
+ result = glide_sync_client.script_exists(
+ [sha1_1, sha1_2, sha1_3, non_existent_sha1]
+ )
+
+ # script1 is loaded and returns true.
+ # script2 is only cached and not loaded, returns false.
+ # script3 is invoked with a SlotKeyRoute. Despite SCRIPT EXIST uses LogicalAggregate AND on the results,
+ # SCRIPT LOAD during internal execution so the script still gets loaded on all nodes, returns true.
+ # non-existing sha1 returns false.
+ assert result == [True, False, True, False]
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_flush(self, glide_sync_client: GlideSync):
+ # Load a script
+ script = Script("return 'Hello'")
+ glide_sync_client.invoke_script(script)
+
+ # Check that the script exists
+ assert glide_sync_client.script_exists([script.get_hash()]) == [True]
+
+ # Flush the script cache
+ assert glide_sync_client.script_flush() == OK
+
+ # Check that the script no longer exists
+ assert glide_sync_client.script_exists([script.get_hash()]) == [False]
+
+ # Test with ASYNC mode
+ glide_sync_client.invoke_script(script)
+ assert glide_sync_client.script_flush(FlushMode.ASYNC) == OK
+ assert glide_sync_client.script_exists([script.get_hash()]) == [False]
+
+ @pytest.mark.parametrize("cluster_mode", [True])
+ @pytest.mark.parametrize("single_route", [True])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_kill_route(
+ self,
+ request,
+ cluster_mode,
+ protocol,
+ glide_sync_client: GlideSync,
+ single_route: bool,
+ ):
+ route = SlotKeyRoute(SlotType.PRIMARY, "1") if single_route else AllPrimaries()
+
+ # Create a second client to run the script
+ test_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000
+ )
+
+ script_kill_tests(glide_sync_client, test_sync_client, route)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_kill_no_route(
+ self,
+ request,
+ cluster_mode,
+ protocol,
+ glide_sync_client: GlideSync,
+ ):
+ # Create a second client to run the script
+ test_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000
+ )
+
+ script_kill_tests(glide_sync_client, test_sync_client)
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_kill_unkillable(
+ self, request, cluster_mode, protocol, glide_sync_client: GlideSync
+ ):
+ # Create a second client to run the script
+ test_sync_client = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000
+ )
+
+ # Create a second client to kill the script
+ test_sync_client2 = create_client(
+ request, cluster_mode=cluster_mode, protocol=protocol, timeout=15000
+ )
+
+ # Add test for script_kill with writing script
+ writing_script = Script(
+ """
+ redis.call('SET', KEYS[1], 'value')
+ local start = redis.call('TIME')[1]
+ while redis.call('TIME')[1] - start < 15 do
+ redis.call('SET', KEYS[1], 'value')
+ end
+ """
+ )
+
+ def run_writing_script():
+ test_sync_client.invoke_script(writing_script, keys=[get_random_string(5)])
+
+ def attempt_kill_writing_script():
+ asyncio.sleep(3) # Give some time for the script to start
+ foundUnkillable = False
+ while True:
+ try:
+ test_sync_client2.script_kill()
+ except RequestError as e:
+ if "UNKILLABLE" in str(e):
+ foundUnkillable = True
+ break
+ asyncio.sleep(0.5)
+
+ assert foundUnkillable
+
+ # Run the writing script and attempt to kill it
+ asyncio.gather(
+ run_writing_script(),
+ attempt_kill_writing_script(),
+ )
+
+ test_sync_client.close()
+ test_sync_client2.close()
+
+ @pytest.mark.parametrize("cluster_mode", [True, False])
+ @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3])
+ def test_sync_script_show(self, glide_sync_client: GlideSync):
+ min_version = "8.0.0"
+ if check_if_server_version_lt(glide_sync_client, min_version):
+ return pytest.mark.skip(reason=f"Valkey version required >= {min_version}")
+
+ code = f"return '{get_random_string(5)}'"
+ script = Script(code)
+
+ # Load the scripts
+ glide_sync_client.invoke_script(script)
+
+ # Get the SHA1 digests of the script
+ sha1 = script.get_hash()
+
+ assert glide_sync_client.script_show(sha1) == code.encode()
+
+ with pytest.raises(RequestError):
+ glide_sync_client.script_show("non existing sha1")
diff --git a/python/src/lib.rs b/python/src/lib.rs
index 09914c2c59..756e0415e5 100644
--- a/python/src/lib.rs
+++ b/python/src/lib.rs
@@ -159,10 +159,11 @@ fn glide(_py: Python, m: &Bound) -> PyResult<()> {
fn py_init(level: Option, file_name: Option<&str>) -> Level {
init(level, file_name)
}
+
#[pyfunction]
fn start_socket_listener_external(init_callback: PyObject) -> PyResult {
let init_callback = Arc::new(init_callback);
- start_socket_listener({
+ let socket_path = start_socket_listener({
let init_callback = Arc::clone(&init_callback);
move |socket_path| {
let init_callback = Arc::clone(&init_callback);
@@ -178,7 +179,7 @@ fn glide(_py: Python, m: &Bound) -> PyResult<()> {
});
}
});
- Ok(Python::with_gil(|py| "OK".into_py(py)))
+ Ok(Python::with_gil(|py| socket_path.into_py(py)))
}
fn iter_to_value(