E\\x0ei?\\x01Kg?m\\xa1/?\\xe8n\\x8f>\\xc9@\\xb1>\\t\\xdb&>\\x84\\r\\t?\\x9a\\xa4=?\\xce \\x10?\\x91\\xae\\xcf>Db\\xce>vB\\xc8>\\xd9\\xffs?X\\x83\\xec=\\xbf)\\x18?;\\xde\\xa0>\\x19\\xac\\x14?\\x99\\xe6\\xed>t\\x17r>!g\\x03?BD\\x86>\\xe7\\xcbk?\\xd8\\xa2B?\\xcd\\xc7\\x01>m\\xc4s?\\xf0)\\xdb>xuG?\\x10\\xd8\\x10?;\\x04+>-`e?\\xc4\\xf7\\xa8>\\xf9\\x86\\x80=\\x04\\x11M?\\x9e\\xe9\\x16?:\\xe2U?W$\\x93>\\xf7\\xbc{?\\xa7\\x02E?\\x95\\xf3\\'>\\xcd\\xb0\\x0b>\\xe2\\x0b\\xa3>\\xbe4V>\\x1fJ.?\\xbayg?\\x0c\\x15\\x1d>\\xfcQn>\\x13\\xb6\\x91>\\xad\\x01]?\\xbcMb?Yw:?,\\xed\\x81>\\xaf\\x87\\xeb=\\xed?\\xd6>\\x8cQP?a\\x89C?\\xb9S*?\\xf5\\xcb&?1\\xc4\\xcc>%J\\x80>Du\\x16=\\x05\\xd6$>u\\x8e\\xe0>\\xd6\\x81L?\\xa6\\xac\\x10?\\r\\x11i?\\xb2~2?\\xa5\\xca]?\\xfa}\\xc4>\\xc5m5?3R3?\\xdb\\x13\\xdb=\\xb3\\xa0[?\\xf0\\xc4V?\\xe3\\x97\\'>\\xa0\\xf1\\x16?=W\\xf5=\\x17O\\xfc>\\xf1I\\xcd>H\\x05r=(kg>EE5>)f3?U\\xae\\xed>\\xdd\\xe0\\x9e>\\x9b4#?`E(?-\\xcdA?\\xba\\x81D>\\x87bk?Kr\\xf2>\\x88\\x15+=\\xc16\\xe6>\\x93\\x05\\xa1>\\xbf\\x7f}>\\xb72*?\\x0f\\xb4;?8\\xc3f>{\\xff\\x00?A&\\xfd>+g\\xdf>\\xc0\\xbf7?\\x86\\xd9F?~dk?\\xc7\\\\e>\\xff\\x99\\xf4>\\xfc\\x11\\x8b=\\xfdda?w\\xa7\\xae=w\\xa8\\xfc=\\xe7\\x10\\x02?\\xfa*E?\\x93L\\x1f?>\\x8dD?\\xdd\\xed\\x9d>\\xb1\\x8bQ?\\x0eV\\x99=F\\x7f|?m\\x10\\xd8>V\\xe0\\x17>\\x9b\\xca#?h\\x83\\xd7>\\xa4vX>\\xec\\xce7?\\xa5\\xf6\\x15?\\xe5\\x9e\\xbf>|\\x85\\xfb>)7\\xf4>\\xc5\\r\\x1e?\\x89\\xde5=\\x9f\\xc66?\\\\\\x8e\\r?\\xde\\xb58>JXe?\\xcd\\xe1\\xc3>\\x9f.p>\\nCw?\\xd4\\x01p>\\x81\\\\\\'?\\xa4\\x8f\\\\>\\x89\\xf1\\x84;\\xf5\\xdfu?\\xed\\x19\\xd9>\\x03~\\xf9>\\x8b\\xfb\\xca=$\\xe4\\xb2>\\xdb\\x92\\x01>\\xd0\\x90:?Le\\xff:\\x84\\'\\xef=0QM?\\xaf\\x01x?\\xb7C\\n?\\xc8\\xb9(?qea?\\x1f\\xbe\\x17?\\x86\\x0b\\xdb>\\x9f&\\x08>A\\xeb\\xc8=~\\xc9\\xa8>\\xd08\\x88>\\x87\\x88O?j\\x96>?x5l>\\xdf\\x8f(?\\xd4[a?}T\\x07?\\xb8\\x0c1?V\\x8fl>>,F?8\\x8a$?\\x01\\xf6+?9\\xfd\\xeb>\\r\\xf0:>\\x13\\xd8~=u\\xb0{>;\\xcc\\x11?\\x94I\\xb3>\\xcf\\x87\\xcb<\\x82\\xdc\\xa6>\\xb3\\xda\\x03?G\\x92,?\\xc4\\xcaG?@\\xabV?\\x86\\xb1\\xff>G\\x82%?\\x93m\\xe7>\\xb6\\xb4\\xd0<\\xc1^Z?M\\x06g>\\xf7]G>\\x92/\\x87>\\x03\\xe4e?1\\xb4\\xe9>\\x12%\\r?\\xa7)\\x89=/\\x18O?\\xfcW\\xf7>\\xa1\\xdb\\x17?\\xd7@\\x11?\\x04\\xea\\xeb>_\\x912?\\x0bK\\xd8=\\xabat?\\x89\\xc5\\xdc>\\x93\\xc1\\xe0<0\\x91\\x07?\\xa2H\\xec>\\x16E5?\"\\xfb]?iX\\xcd>t\\x7f\\xe8>\\xc7\\x88\\x80>u\\xaf\\x11?\\xb4`\\x02=\\xf1\\xe6\\xb8>.\\xd7;?\\xeb\\xe82>KO\\x8d>\\xc7\\t)?\\x86\\xd7\\xed>z\\xf99?\\'\\xf8b?\\xaa\\x1bC=\\x7f9`?tGi?Kc\\xf8>\\x97\\x96a>\\x03\\x82\\xba>\\xfc\\r\\x85>\\xbb\\xd3u?j\\xb7>\\xf8\\x81>\\xd9\\x16+?g\\xdc\\xb7>P\\x00q?\\x1b\\x81+>g\\xbea?\\xcf\\x9b\\xe6>O\\xab\\xbf>S\\xe1{?\\x18\\xec\\x85>\\x92\\xc5>?e\\xe8J>\\xc7\\xf2\\xbf>\\xbc\\xf7\\x06?\\xc0\\x91?>$\\x18>?\\xdc\\x8d\">]\\x1c\\xa5>\\xb7!l?\\x94\\xf4\\xb2=8\\x05b?\\xf9j\\x02?1\\x1b\\x9a>\\x01O\\x05?\\xef\\xcb.?\\xb7\\xe0\\xeb>\\x872H?\\x7f\\x1a\\xb6>Q\\xc4P?F+3?X\\xab2?^E+?\\xaaK\\xaa>\\xa5\\xbf\\xbe=\\xc3F\\'?\\xcc\\xb2M>q\\xd4.?s\\xf9K?e\\x0es>\\xa2]\\x1c?~h\\xa5=\\x8d\\x81;?\\x95\\xd7\\x19?\\x9f\\xbd\\x7f?:\\x00f?\\x1c\\x19\\x12>1\\xd2\\x80=\\x81\\xb1\\x06?\\x83\\x9bb>5\\x8f\\xae>-\\x9d\\x1d>\\xf4\\x89b?z\\xfdT>\\xf0n`?,\\xba2?\\xbe\\'\\x1d?\\xc6\\n\\xb1>J\\xd0k?\\xa4\\xfcq>\\xd4\\x02\\'??@\\xaa>\\xc5\\xfc\\x93>\\x8f\\x12y>s\\xa1\\xc7>\\xfa\\x15J>\\x13\\x90X>\\x9c\\x0cp>\\xe5\\xd0\\xf9>p\\xb0\\xad>Ef\\xb3>4\\xb1L?^\\xa3\\x87>_\\x8c\\x05?:lE<\\xfc\\xf8w?u\\x12\\x8a>W5\\xf7>\\xcb\\xb8{=s\\xa7\\xc9>K\\x88P?\\x93\\\\\\x93>\\xb0\\xed\\x03?\\xdb|-?\\xb8U\\x1c?\\xe1! \\xc6\\x94>n\\xcec>m\\xa6K>.\\xd3Y?\\xeeN\\x12>\\x89\\xa8@?\\x80\\xeaO?*l\\x7f?\\xa9\\xe6\\xc0>$F\\xcc>\\xf3\\x8e\\xd1>9\\x98\\xcd=6n\\x01?\\xd8\\x1et?+\\xb3I?\\x9d\\xdc\\x11?\\xc5v\\x10?%\\xda:>\\xbfC\\x17>\\xda\\x85\\xb8=*\\xceO?3eS>\\xff\\x1c1?fs\\x1e<\\xa12\\xce>\\xecB\\xbf>\\xb9$,?\\xfa\\xe6N>\\xdc\\xb1H?\\xf2\\x07\\\\>\\xc6\\x9e\\xf0>\\xce\\x18L?K\\xc1\\x13>x\\xd5)?F{q?\\xe7f.?S\\x02!>39.?\\x19c\\xaa>}\\x9bs>\\xcf\\x1fx?D\\xf3\\xd8>!!H?\\xa7uD?\\x8c\\x9e\\\\?\\xb5\\x1dY?f\\xb0\\x04=\\x90\\xda\\xd1>\\xd2\\x13\\r?\\x92{\\xcb<\\xa1\\xf4L?\\xedh^>\\xe6\\xe5o?\\x86\\xe49?oS\\x1d=sWs>\\xb0c\\xad>\\x94\\x1e\\xd5=)\\xdf\\x12?\\xb5A\\x1c>\\x11\\xda{?\\xd5+\\n?\\xd1G\\xe4>\\xdf^7>\\x9e-\\xc7=]\\xbbp?\\x88\\x97\\xaa=\\xfc\\xb3y?J\\x0c\\x0e>\\x83\\xa0\\x89>\\xfeEQ?\\x93H\\xd9>\\rZ\\x19?\\xfc\\xe2\\xf5=\\x93d\\x0e?!\\xb1\\t?\\xaat\\x19?\\xce\\xc2\\xdfs\\x8fd?\\x17\\xd53>\\x01\\xcd\\xd0>\\x86\\xbfD?\\xdbD\\x1a?\\x17\\x81\\xac>\\xfb.#>\\xfe\\xfd\\x15?\\xb8\\xa0\\x00?TnZ>Q\\xd0\\x15?\\'y\\x13?\\x04\\xca\\xe4>^\\xc4\\xb6=\\xa9$]?7\\xc1\\x9f>S\\xbf\\xc8>\\xa7=k?2\\xf7D?%\\x03c?O\\x99l?\\xd4]\\xa0=K(\\xda>\\xa6\\x90]?2<\\x13?\\xb4\\x16a?y\\x80h?k\\xed\\xd8>D\\xa1\\xa7>\\xae\\xc4\\x1f?\\xe5|\\x05?\\xc9g\\xe6>\\xfc4\\n?\\xd9\\xaf\\xf9>\\x10@\\xf1=% \\xe2=\\xb5\\xae\\x1a?\\xedv\\x8b<\\x98\\x01~>\\x81\\xb4\\x13?\\xa4N\\xe6=\\xb69-?za\\r?\\xc6\\x00A?\\x97\\xf2k?\\xa7\\x06U?\\x91\\xbfs?\\xfd\\xa6\\xf6=K\\xf8=?|\\xea\\xd3>\\xa7\\xaa.>\\x12\\x08F=\\xa2\\xcd\\xf4=\\x8doh?\\xb8\\xe7q?gt7>\\x03\\x91C?ML\\x03?\\xe7m/?=B\\x06?\\xe3\\xd2E>P\\x1c\\x10>uH\\\\?|\\xafH?\\xa2\\x82|?\\xad\\x99\\x14?6\\xb2V>\\xe9)\\xa8<\\x04\\xf3\\x04?B\\xa1$?ne\\xc5>\\x96\\xb2/?\\xb0&\\xa9=\\x8f`\\xaa=\\xce-\\x03=\\xbc\\x10\\xdd>!\\x16g>[R\\x04?\"\\xa3F?\\x83\\xd6B?Z\\xefC?\\x85yX?%o\\xa4>z\\x19}?\\xf7\\xcf\\x92=\\xf7b\\x9b>\\x8cG\\xcc>\\xfeY8?\\xd8\\x86\\xac>\\xb3al?\\x00\\xf5\\n?t\\xfcx?B\\xe7\\xe7>W\\xb8\\xb5=K\\xedv?(:I?\\x82\\x9a\\'?\\xe3\\xef$>fW\\xdd>\\xf9\\x81Q;P\\x9a\\'?\\xfak-?\\x1b=+?P\\x11E?\\xcbG\\x07?\\x90\\xcc\\x9f>4W\\xfc>E#\\xa5>\\x08\\xf1V?-o\\xf7>%W#?\\xfd\\xda\\xa9=*\\xab\\r>\\xb5\\x1a\\xd1:\\xbb\\x89Z?1\\xa4Q?*\\xc7\\x05?\\xe0u.=S\\xbe\\x08?\\x1c\\xe9\\xc7> \\x95\\x9d>\\x84\\x03+?\\x1a9A?\\xcb\\xb8\\xa1>\\xfb\\xa1\\x84=\\xac\\xadc?\\xe5\\xca\\x81>\\xec\\x07/?\\xc9\\xe0p?(TU?\\xef\\xefP?\\xec\\xce|>:\\xc5\\x17?\\x1a\\x89\\x9c>\\xab\\'F?,\\x9eR?\\x8d;\\xb3>Z {?\\xa8\\xee\\xbb>|\\x81m?=\\xf4`>\\x01\\xea\\x9e>q68?Xcj?T\\x04\\x1e?\\xb1\\x13$?\\x88\\xea\\x9b>\\xa8\\x83\\x08?h\\xffd?Vw\\n?\\x07\\x93h?424?_\\x1b\\xf9>r\\xcf]?L\\t\\xdc>i\\xc9q?$B\\xce>\\xb8\\xc6\\xb1>\\x01\\xe5\\xa3<\\xd6z\\x9b>\\x15\\xecn?\\x96\\x0c<>7\\xa2\\x96>\\xe1:=?2\\x03\\xe9>oPr?\\x85\\x9b\\xac>r7\\xf3=\\xe0^\\x0c>\\xefk\\r>f\\xac8?\\xbc\\xe5\\x11?1kS?\\xd9\\xd3\\x1a>M\\xfe\\\\?\\xb3J\\x14=\\x90\\xba@?\\x98\\xfc\\x8b>\\x01\\x8d,?>\\x13{?\\x0bb4?\\x9b\\xc7\\xd4>b\\x16\\x99>^9;?T\\xec\\xd3:^*K?\\x1b.\\x05?\\xa3\\xb1\\x88>-\\xb2\\\\?\\xa9\\xd3\\xba>y\\xda\\xb8=\\xc2{\\'?\\x95A0?j\\x80S?yB\\xe1>:\\xa4T=Q\\x01y>\\xa3-l>-\\x08\\x19?\\xe5\\xe6\\x08>q\\x0e1?\\xe7wG?\\xcbs$?pH)?\\x9b\\xb8\\xe6:\\xad\\xc8I>\\x8a\\x9ad?\\xff\\xcet?\\xaa\\x1f\\xce>\\x92U\\xfc>\\xe1\\xb4V>,T8?\\xbbR+>\\xf0\\xc7a>\\xd8\\xa1\\x90>\\x14\\xc7b?RFP?6Ps?\\xd1G8>\\xbcQA>4\\x99\\xbb>0\\xfd\\xda=\\x89\\xdc~?l%R?\\xb8\\x05\\x1e?\\x0cS3>\\x1c\\x8aU?y\\xb2P?\\x15\\x97\\xe7>5\\x91G?8C\\xa5>c\\x8e\\x0e?\\\\UP?;4m>6\\x024?\\x11\\xed\\x87>}H7?f\\x8e&?+\\xe4\\xc0>\\x11\\xfdL?\\x94\\xf1\\xef>\\xea\\xa3\\x08?1h\\xfa>\\xed\\xfdJ?*\\x13:?\\x16:\\xe0>\\x0c\\xd8\\x14?\\xb9\\x1ej?(\\x8c\\x94=\\xce\\xe6\\xa3>F\\x14\\x99>r\\xf8T?\\xa5~\\xca=t\\xe7e?\\xfc\\xbd\\xc6>+\\x8c\\x08?\\x88\\x99b?\\xb7\\x02|?B\\xa8\\x87<*\\x0f\\xea>\\xf6\\xc8~>\\x9b\\x92[?\\xdcO\\xa2<\\xa6=Q>\\x9a\\xfbl?\\xa5\\x89\\xb0>\\xf0f\\xd5>v1/?S\\xbd3?\\xb9\\xc5D?\\xc5\\x0f\\xff>\\x07\\x043?\\x08\\x05\\x12?&\\xc3G?#\\xcb\\xb6>$\\xf3h>\\xf6\\xd1\\\\?z\\x85\\x03?I-\\x1b?I\\xf8\\xb1>\\n*\\xb9>\\x82\\xcd\\x1b>\\x11A\\xd9>VE(?v\\xd2\\xa4>\\xf0\\x8f\\xc3=a\\x89\\x7f>\\xbcF\\x9f>\\xe5\\x97\\x81>&\\x1eI?\\x1f[\\xab=r\\xac\\x1a;\\xd6\\x16\\x1a?\\x06\\xfa\\'?[\\xc1\\x13>\\x1el(?\\x971\\xd5>)\\xbc\\x85>|\\xe3;?@KB?\\x99\\x17\\x13=\\x1a\\x9dP?\\x92\\nR>c\\r}?1+\\xfd>\\xb61&?\\xd7o@?\\x0cP5?\\xcf\\r\\x08>\\x06Qo?\\xa2\\xe2Z?\\xa5\\xccY>X\\xda\\xdb>]\\x8a9?\\xbc>u?\\xfc\\xcc->!\\xe27?\\xec\\xd4Y?\\xabj^>v\\xa7W?\\xeb\\xa52?\\xbb\\x10$?\\xa3\\xf3;?BOw?\\xc6=p=\\xadU}?\\x14\\x9a\\x03?\\xc8)\\x01?\\xa7\\r\\xa1>\\xfdKX?\\xcf\\xab\\xcd>\\x0ecE>\\x88Ox?bx%?\\xe8sg?\\xa1\\xb2d?\\xed|\\x98>/p\\xa1>\\x90b\\x0c>Rd#?\\xde\\xda_?\\x1a\\'\\xf7>y\\xaf:?%`\\xe7>\\x99_+?6\\x8a\\x08?\\x19YJ?\\xc1\\xb1\\xd9>PF\\xe2=\\xd4\\x8a\\x1b?\\x1f\\x18\\xa5>.\\x94\\xc3T\\x1b?VVu>\\xd5\\xe0\\xfd>VC\\x17?BE\\x0b=\\x9a\\xf7\\xe1>CTK>\\x97\\x92d?\\xec\\x1f\\x1b?\\xe0\\xc7J?\\x9b\\xef\\x13?\\xdd\\x8e\\x0b?q\\xb1a?\\x07Tt?\\x91\\xa7\\xe9=(\\xe3\\xaa>\\x04\\x8e[>?\\x19\\xc8>(\\x10\\x9f=d[}?\\xf2\\xba\\xa5>N~\\xd4;98$?DG\\xc8<3\\x026??\\x99e?'},\n",
+ " Document {'id': 'doc:b', 'payload': None, 'score': '0.253424823284', 'vector': b'1\\x16\\xe9>\\x9cax?z\\xc6c?\\x92\\xa2%?\\xcdk\\x9c>\\xa3|\\x8a>#L\\xec>\\xc9\\xca\\xf0;\\xce\\x1d\\x1a?\\x12\\xe4\\xab>h\\x88\\xe4>\\x9a\\xb4\\xe8>\\xb7\\x03\\xdc=\\xf2*l?\\xe7\\xe1,?(;p?\\xceL\\x13?\\xf5\\xfd\\xdc=\\x19\\x92\\x84=7\\xe1\\x7f?\\xb7\\x898?yR@?\\xa4\\x1cp?S\\xde\\xb1>5#\\xf4>F4\\xc4>\\x14R\\x18>\\xb8n:?/%A?NEU?tu\\x0e?\\x85\\xe8\\x88>e\\x04\\x1b?T\\xed4?\\x08\\xea\\x03?\\x98\\x16p?\\x0f\\xa4\\xfa>$\\xb5O>\\x9b\\xb0~?\\x94\\xab\\xbc>\\xa0\\xe6G?\\x1f\\x8bz?s\\xca-?\\x03\\x9e\\xb3>\\xb4|\\xd1=\\xf0Z_<\\x1a\\xe3:?\\x06q]>\\x02\\x0b\\x9c>\\xdcZ\\x1a?L\\xb0\\xcc=E^\\x1a?\\x05~\\x03?m\\xea\\x15?\\xc4r]?\\x0c\"\\x89>\\x9c\\xb78?\\x87\\xf3\\x8f>+e\\xc0>\\x16\\xe3@?\\xa5\\x85M=jq\\x12>5.`=T\\xac&?\\xc7\\x91\\xb8>\\x93\\x9cs?\\xb1\\t\\xfa=\\xb2\\xdfd?#\\x8cO?zX&?R*\\x05?\\x90\\x9f\\xf7=\\x83\\x9c\\x02=AY\\x1d?m$w?\\xa6\\xc4\\x80>\\xfd\\xceH?\\x87\\xbaK>\\xca\\t\\xf8<{\\x1c\\xf5=L\\xf9a?TB ?\\xae\\xcd\\xb2=\\xd0*\\xe9=\\x92\\x88[?[\\x06F?\\xb4\\xffu?Y{\\xbc>\\xba\\x8bU?z,\\r>d\\x7fE?\\xcd\\x85\\xb4>m\\x91\\x9a>z\\xa5N?7\\xd5\\x8a>\\x8a\\xfbA>n\\xa2,>\\xf9j\\x04?\\xe8\\x84\\\\>\\xb4A=>\\xe8v\\xde>\\x19\\xbaX?\\x1d\\xd3E?\\x90\\xcf\\x9f=\\'\\x18w?\\xbd:\\'?1C\\x17?\\xac\\xaeJ?ae\\xef>\\xc8_v?\\xe8\\xfcP?/\\x96S?E\\x16D?2\\xf4~=\\xcaV\\x1f>\\xef\\xdb\\x1a?\\x87)\\x11?:6\\xfd>uLk?\\x9ddT?\\x02gc?\\xc7A\"<\\x87\\x18v?x\\xec8?\\x84\\xad\\xf4=\\x0cj\\xf6>\\x04\\xa0\\xaf=\\xa5\\x12\\xf0>\\xda\\x98}?kB\\xab>\\xd2\\t\\x9c>\\xf0\\x90\\x8c>\\xbe,9?\\x1b>S?J\\x17[?+\\xb2k?\\xbdO\\xf9=\\xb8\\xdd\\xaa>\\xbc\\xbc\\x1a?\\x98\\xa3&=\\xdbo\\x1b?*\\x1c\\x17?\\xacP\\n?\\x15\\x99\\xfb=D\\xd6s?\\x97:~?\\x83\\x97H:\\x814\\r?\\x1d\\xbdt?\\x98\\t\\x0e?\\x10\\xbd\\xf6>\\xad)\\x80=(\\x8b!>Th\\x1e?\\xfaOX?\\xd835?\\x00\\x8c\\x03?9\\xf1\\xa7=\\x80\\xbd\\x12?*\\t\\x19?F\\xa7\\xd2>\\x0e\\x9c\\x1f?\\x8d\\xed\\xf9>\\x8bD\\xea>\\xdb\\xe4+?\\xb3%\\x12?h\\xd0F=R\\xb5\\xc7>\\xaf\\x99\\xe8=\\xda\\xe4\\xe6=J\\x073?4;\\x1c?|e\\xf6=\\x99\\x8e!?\\xdeJ\\x9d>5\\xa2j>\\x06\\xdf ?\\x99\\x85\\xfb=A\\xcce?z\\x19M?\\x1f\\xccA?\\xb3\\xea)?Ex\\xf7>4\\x15%=\\xe4\\xe1(?C\\xe0~?;\\x9a\\x19?\\xe2A.?@\\x82\\x1a>\\xf2\\x909?\\xcb\\x18\\xfe>\\x87\\xff\\x11?\\xd0-H?h\\xd9K?Bps?K\\x9b\\x0b?|%u?\\x1b\\xcd\\n?\\xbb\\xdb\\xb9>]\\xd0\\\\?\\xbee\\xb5>*\\x9f\\x9b=\\x8f\\x1b\\x7f?O\\xf8N>\\x91\\xf2b=\\x9a\\xe5\\xcc=5\\xf3o?\\x86\\x83\\xbc>9\\xaeG>#\\xd4\\xbe=\\xa9m\\x85=\\xc3\\xa7\\xf3<\\xf6\\xb3\\xed>\\x94\\xcc\\x98>\\xb7m\\x12?\\x19\\xbaq?\\xae\\x96Z?g\\xa2\\x11?j9\\xc2>/\\xc2f=&\\xcf??\\xdc\\xado?\\x01d\\x1f?B\\xb8U?t\\\\b?\\xc0W\\x9c>r\\xeb}?\\xef\\xc9\\xe6=\\xaf{#?\\x89BW?S\\xffO?\\x8e\\xaa\\x03?\\xfb\\xf1\\xb5>0\\x89\\x81>\\x96\\xed\\r?\\x88W\\xf6>\\xe5ZG>\\x122\\x14?\\xe8\\x0e\\x91>\\x1a\\xc2\\xf4>\\x15\"Q?\\x8c\\x87\\xa0>\\\\d\\xa7>p\\x94-?\\xe2\\xe5\\x17?\\x8f\\xe5\\xdd=\\xb9\\x04#>_\\x9fU?\\xd8\\x0cL>2Jn?1\\xa7\\\\>\\xebT\\xe4>NT\\x9c>B6\\x0e?_\\x0fS=\\xb6\\xcdR<\\x96\\xb2\\xc9>\\xbc\\xf6\\xe2>^\\x8f\\xa2<\\x95\\x93\\xee=\\xf3\\xc0C>\\xfdw\\xd1>\\xbc\\xba\\xf5>\\x8b\\x13Q?\\xa5_\\xaf>\\xa9\\xb0W?\\x1bWd>!\\xa8\\xf8>\\x9d\\x87\\x17?\\x84.\\x01?G<\\x10?\\xf6^\\xc9=\\x8a\\x87g?\\xae\\xc61?\\xcbn]>\\xe9\\xf9+>\\x1b\\x01\\x99>\\xcd\\x02\\x86>\\xeb\\xe0y?\\t.4>\\xa4\\xb2\\x01?;4]=}\"\\xde>\"\\xf5n>{\\xe7\\r>\\xbb\\x03G??8\\x18>\\x96\\xd1$?\\xed\\xa2\\xcc=\\x8d\\xef\\xd1>t\\xf1\\x08?\\x82\\x9cO>~I\\xf2>\\x0b\\xd6\\x1d?\\xff-\">\\xb8(_?W\\x94o?\\xcd\\x08A>\\xac7\\x87>\\x15\\xaf&<\\xab\\xcd\\xd2>\\xf6\\xb9\\xae=@M\\x86>\\xb2\\x89^?=\\x10j?x\"\\xb9>\\x03\\x05H?&L}?\\x05\\x803>\\x97\\x18\\xe4>\\x1c\\xb9\\xaa>\\x07\\xb5\\x9d<\\x05\\xb1~=\\xa9L\\xbc><)[?\\xdc\\x8eC?\\xa3\\xcb\\xf8?_\\x12e?3\\xce5?\\xef.V?h\\x92\\r?U<\\xa6>\\xe6,\\x83>J\\xd3\\x8a>\\xf4\\xe6*?\\xf1y]?\\xea\\xda\\xea=|\\xff\\x96>\\xe4\\x8ed?\\xff\\x00\\x82>\\x13\\xb6\\x1e?\\x04{h?\\xa9\\xf4\\x86>\\x8d\\xed\\xf9>]\\xf3|<\\x8d!\\t?mq\\x94>M6]?\\xa5cP=\\tb~>\\xcf\\xd6\\x83=K\\xb4Z?\\rPt>+\\x1bW?\\x93>W?\\xc7E ?\\x81\\xcb-?D)i?\\x99\\x02\\xb0>\\xa4X\\x18>:\\xc5\\xc8>~\\x85\\xef>=\\x7fy>h\\x8ee?\\x02\\xd7Y?\\x1bd\\xf2<\\x82\\xda\\x0c>\\xb9f,>\\xed\\xd9:?\\x12\\xf66>\\x90\\xeek?:+l?+e\\xbe>7\\x18=?j\\xda~?\\xd0A\\xb8>\\x8f2\\x95>E\\x92\\xc0=f\\xb2\\xbb>X\\x8e\\x99>\\xfc\\xcaa>\\xa7j\\xde80\\xe6\\xde>\\x8d:l<[%\\x0c=\\xaaq\\x05?\\xe8\\xf7\\x89>\\xc4\\x9c\\\\?h\\xcfG?\\xfa\\x03T?v\\x03k?\\xc7\\x90\\x0c;Y\\x8fL?\\x15S\"?\\xbc\\t\\xd9>\\xc8\\xda\\xa4>\\xcf\\x82+?\\xe0\\x11\\x1c?\\'\\xe2\\xc5>a\\x031?\\xd4\\x08G=\\x82\\xb5\\xd5<\\x9e\\xa7j?b\\tx>&I\\xc1;\\xfa\\xc0c?8$\\x0c?t\\xea??\\x1eoT?\\x02*l?\\xe0\\xee\\xa0=3\\x0f\\x84>\\x81\\xae\\xbc=!E\\xed>\\x0b\\xee\\x80=|^\\xeb>\\xb6\\x0b@?\\x1a\\xb5v>\\x86\\xc5M?\\xb3]k?x\\x0e\\xe1>\\xb0d\\xcb>\\xe5\\x7f\\xa5><\\x89\\x10?{\\x96\\x96>\\x9bqf?\\xf6\\x9cd>b\\xee\\xbf>\\x8dP\\x00?\\xbe\\xa8;>a\\xf0\\xd4>?\\x89T?\\xbf\\xff\\xb8=\\xe5\\ry?\\x1b\\xcb\\x02?\\xb7S\\x19>\\x0f\\x96&?\\xa1\\xca\\x18?[A)?y\\x8cX?\\xf5O#>\\x85\\x8f\\x11>}\\x14\\x81=UN\\xb6>\\x8fi\\x92>\\x9d\\x84A> \\xd5\\xad>\\xff2\\xa2>yG\\x18<\\xb5m/?cW.>#\\xa1e?\\xa0\\xfa\\x1d?\\xcfd\\x16?E\\xaal?M\\x87x?\\x8b0\\xe6>\\x10MW?\\x01<\\xd1>\\t\\x990>\\xa32\\t?\\xfd\\xe3/?\\xa9\\xe5=>\\x7f\\xdd:>a,\\'>Eo\\x06?\\xe9\\xabe?\\xc5\\x879?\\n\\xe5h=\\xd4\\x0b^?y\\xa7G<\\x02A\\xf5=\\xf7Hy?\\xe3\\x9e ?\\x82\\xb6>?~\\xf8\\xf4>\\x90\\xd8\\xaf=\\xfcpX?\\x1d\\xa5\\x1e?2\\xc11?\\xadf\\xe1>\\xcf\\x1c\\n?x\\x0f\\xd8>\\x86\\x02\\xa9=\\xb8\\xb3\\xcf=\\x11\\x91:?\\x92\\xf4\\x0e?\\xbf\\x14\\x9b>\\x93G\\x7f>\\xaa-\\xea>\\xf1\\x95\\xbb>\\xa0*\\xde>R9\\x7f?\\x95\\xe2N>B\\x84\\x12?2\\xa0u?\\xadbD?\\xbb\\xefO?i]\\xf8=b\\x05\\t?g\\xd6\\x80>F\\xf6[?n\\xde\\xb0=\\xa2\\xbdj?I^\\xfd>\\xb7k\\x07>\\x1bE\\xf8>vB\\xb7>O%=?T\\xebF?\\x90\\x87m?Q+\\x16?YCc?\\xba\\x08\\xb4>\\x0fNT>xe\\x0e?\\x974P?\\xc4\\xbdi>\\x8bu\\x81>U;\\x1a?|\\xae8?\\x1f\\xef\\x01>4M\\xaa>\\x7fgr?\\xf8\\xd6\\xba>z\\xa0\\xbc>}\\xde/>\\x90\\xbeI?\\xa5\\xc2e?\\x9e\\x0cf?\\x95:J? \\xb2\\xf3>\\x00\\xf6p?\\xdaZE?\\xce\\x18\\x1d>\\xe8\\xdc`>\\x9f\\x03\\xa0>\\xe1\\xf9\\x1d?E_\\xcd=\\xdc\\x1e9?\\xb3?a?\\xcfnf?\\xc1$\\x01>5S\\x11?f\\xf38?0z;?\\xfd\\x8cb?C\\xe1\\xe6>\\\\\\xd3S?l_\\xa5>k\\xe8\\x1f?\\xa7\\xe3\\xea>\\xb5$p>\\xa9\\x90\\x05?z\\xf4P>&\\xb0\\x0b?M\\x8b\\x08?\\xdd\\x94\\xcf>MA\\x0b?\\xea\\x12\\x19>l\\xf2\\x1a>\\x9e\\xecs?\\x1a=\\x83>\\xd9\\xc5\\t?n.2>\\t\\xb73?\\xbb\\x9f??\\x15=a?s\\xd5N?\\xb7\\xc9b>`\\x1dP?\\xa2\\xd0\\x80>\\x04\\xf4\\xd0>\\xf6,K?~\\xb2z>\\xcc\\xa5s?$\\xd0f=\\x1f\\x86\\x1d?\\x17\\xc8v>\\x11\\xd8@?,@0?\\xc7\\xa9\\'>\\xc0/\\x1b?!\\xbb\\x85>I\\x9a\\xb5=7\\x8dx?z\\xa7w?w\\xb4\\x1e>6\\xb8\\x7f>\\x8a\\x86\\xca>K|`?\\xd5\\xc7\\x19?\\x99E ?0@%?\\xf3\\x0f\\x10?\\x9a\\x01\\xa2>\\xc5\\xbe]?\\x13LR?V\\x0eH?\\x7f9\\x13>\\x10\\x10\\xce>\\xba\\xd6\\x00><\\x03\\x87>\\x1d/\\xaf=Q\\x06x?6y\\x03<\\x9c\\xa88?\\x06\\\\\\xa0>\\xf24\\xe8>\\xe57V>\\xe4\\x803?\\x0fu5?\\xb8\\x02\\xd2>:P!=\\x08\\x98\\x88>&\\x1dW?\\xf9f\\xcc=BU&=;\\xb5\\xd6>\\xa1\\xa5\\xb3>wj`?\\xf9 g?\\x02A+?\\x12\\xe4H?\\x99\\xb4\\xf6=\\x15\\x02\\xed>\\x9eKZ>\\x14vX>\\xc7\\xe2\\'?_\\xd16?e\\x166?y\\xc2R>\\xc3\\x93n>\\xc8\\x98\\xc9<\\x9d*\\x01?\\xf1*\\x8d<\\x9e\\xe2\\t?\\xbb\\xcdS?C\\xdc\\x0c?\\\\\\xa6h?\\x9a\\x13\\x8d>5\\x023?$+\\xcf=\\xd5H\\x03=w\\xba\\xba>\\x1f\\x86P>0\\x06\\x0c?*\\xac\\x03?\\x8e\\xeb_?\\x14~\\x14?\\xa1h=?*<\\x94>\\xe3\\'\\xa5>&\\xfb2>\\xf7\\xbc\\x9a>2\\xc2\\xf6=a7\\xad=<3\\x03?\\x89\\x16u?f\\xa2v?\\xfc{\\x05?\\xe9\\xbf\\x95>\\xc8\\xc9\\x0e?\\x81\\xf9D?\\xbd\\xc8\\x18?\\xe7Y\\xb9=\\xa8\\xac\\xa8>\\x109c>\\xd4\\x04\\x18?7\\x859?\\xa2e\\x92>\\xcf\\x9d\\x11>\\x95\\xe6\\x94>\\x17\\x02\\xf1>\\x85\\xd3Y?\\xa0E\\xc6>\\x80n\\xe2>/\\x03\\x10?\\x16-\\x8b>\\x95h,?r\\x1a\\x11>2L\\xe3=\\xa0\\xf8E>\\x8b\\xc3\\xfd>\\xebN\\xc2>\\'=\\xd2>\\xc6\\xadM?\\x04\\xe8\\xf6>\\xf2\\x89\\xc5>*JG?\\xdb\\x1db?o\\x9bg?\"\\xf7o=ly&?2\\xf6f?2%\\xd8>\\xaf70?\\xfdV\\x82>\\x1a\\x10v?\\xa4\\xe9\\xd4=e\\x8cK?\\xf2=\\xe5=\\x8d\\xa9u?C\\x1c\\xff>Maq?I\\x97\\x10?t\\xbf\\x04?\\xe3Z\\x00?\\xa5\\xec\\xf7=\\x0ew\\xbb>3jP?\\x1eS\\xec>\\xdb\\xe1??\\xde\\xb5\\x15?\\xb1\\xc0H>\\x0f\\xf7,?F\\x83\\x8e=\\xd5\\x08_?\\x1a\\x0fo?i\\xe3p?\\xe0\\x1a\\x11?_\\x0cI?rF\"?P\\x82&?\\x94\\x8fY?\\x0f\\xb5c?\\xc5$\\xa4>3-}>\\x05\\xdcV>\\xeb.\\xe1>\\xa5\\xf8\\'?\\xbd\\xaaN>\\x1b\\xc3\\xce=\\xad\\xc2\\x0f?R\\x80k?\\xb0\\x95\\xf2>\\xa8\\xe7\\xb8=,3\\xb0>\\xc0i4>9\\xd1\\x1d?\\xc5\\x8d\\x11?\\x17\\xeb\\x13?\\xed\\xecE>\\x88\\xb6Q?Ou\\xea>\\x84<\\xed><\\xcf\\x02>\\xec\\n\\x90>\\xb1\\xe4\\x15>9\\xf8U?\\x90\\x91\\xb8>\\x94N\\xd6>\\xbc\\xf2\\x0f>\\xaa\\xae\\xb8>\\x83 \\xe3>fH\\r?\\x8a\\x19\\xae>c\\xe3\\x8c>x#w?\\x07*\\x14?\\xba$\\x89=\\x0c\\xe7T?\\xbb\\n\\x14?*\\xa5\\xd5>@\\x15\\xd4=\\x1d}1?\\xaf\\xbb\\x1a>\\x83\\x18\\xa8>0\\r\\xeb>\\xabt\\xf3>\\x1b\\x9f\\x03?}\\xd7k?2+c<\\x04\\xf2\\xe9=~B=?\\xc4an?`\\xd9\\xb3>\\x82\\x92\\x7f?\\xba}\\xa9>\\xe75j=\\xe8\\x9f/?\\xd4Z\\x19>\\t\\xaa}?>\\x97\\xa6=o\\x1b+>\\\\\\xca\\xf9>\\xe3Xl?C\\xa3T??*\\xf2>\\xe1\\xd7d?\\x90M\\xb2>2{U?\\xba\\xd0y?\\xe47M?M\\xca7>3\\x84\\x1e?\\xb0D\\xfb>\\x1d \\xc9>~I\\x16?\\\\O\\xf3=:\\xbf#?DT\\xdb>\\xa9\\x901>e\\xd9\\x16>\\xae\\x13\\xa3<\\\\\\xc5\\xfc>\\xde\\xabk?\\xd7\\xcc\\xdb<\\xdf\\xb6;?\\x99\\x13)>\\xc7q`<,\\xa5\\xe3>\\xb6\\xfd\\x02?\\x15\\x11\\x06?ia/?8\\xf1D>U\\xddf?\\n\\x9f(?O\\xf0d?\\xa114?\\x85\\xca[>P\\x89\\\\?\\x7f\\x1df?r\\xdd|?\\xb8\\xcfV?\\x96AS>gs\\x0b>}\\x89U?\\x89\\x18+?\\xa0\\xf4\\xb1>\\xf0\\xf3n?\\x8aQ\\x11?p\\xbdI>\\xed\\x10%?\\xdc/\\x08?j@\\xcb>C\\x04\\x8b>\\x8b9\\x11<\\x8a\\xc2 ?\\xb8,7>\\xd7$\\xeb>\\x8f\\xc0w?\\xfc\\x0fb>cJi?\\x00\\x14\\x0f?\\xeb\\xb9:?\\x18!`?.\\x18\\'?\\\\\\x98Y?\\xc1\\xe0\\xa9>~\\xae2?\\x8a\\xe0\\xa0<\\xec\\xf0&?\\xf71J?\\x92u\\xef>S\\xfem?\\xd6\\xea}=\\xfa@\\x99>8\\xffo>\\x83\\xddq?\\xf3m\\\\>%\\xfe\\xbf>\\xac\\xdc\\x1c?\\xba\\x80Y?\\x01\\x08W?`\\xf5i?\\xb0\\xda\\xdf>\\x08@>?\\xc8\\xb6\\x8a>\\x98_\\x0e=\\x88r\\x9e=\\x94h]>\\xa5\\xba*?\\x0c\\x10!?.\\xbe\\x9a>\\x87R*>-2\\x8b>\\x89\\x08I?*\\xf5&?\\n40?\\xe1\\xa6o?\\xafW\\xf1=i\\xb6\\x83>\\x10\\xe0\\x18?\\xb48B?\\t\\xaf/?\\xdf3o?)jY>i\\xce\\x97>X f?x\\x84\\xbc>d\\x7f\\x19>\\x00(\\xe4>\\xc7p[?J\\xce$>\\xe9\\xbeq?M$O?\\xdd\\x0b5=v\\xfaS>d\\xa5t>bj\\xec>\\xab\\xees?v)7?\\x9d\\x03\\xd7>\\xbc\\xa9\\x98=\\xc7x\\';\\xa4\\xb2\\xdf>\\xfe:\\x06?\\xde\\'\\xeb>\\xf5-\\xa7<\\x9c\\xc9f?`\\xf9@>\\x1e\\xffh?\\xe1\\xd7\\xbb>\\x84D\\xbd>\\xf4(\\x7f>D\\x15U?T\\x9e\\x80=>\\xbbO?\\xa8R\\xfc=)#n?\\x91\\xd8\\t?k9Q>$\\x9a\\xad>$\\xd3b?\\xee\\xb6.=\\xc0\\xf8\\r>\\x87\\x8f\\xa1=\\x95Y]?P\\x0f\\x1a?\\x14\\t\\xf0>n\\xa1\\x16>@\\xff\\xbb>\\xa44\\x03?<\\x87\\x8d=e4\\xd1>\\xbb\\x1fD?\\x84-\\xb9>\\x11\\xc6\\xab>\\xce\\x99|?\\x82\\xde9?C!\\xb1=nXh?|\\\\\\x12=\\x8a\\xea8?Q\\x80i?\\xf6\\xf3k?\\x8c#n?\\x07\\xb6\"?b\\xe5.?\\xc8\\xa0y?&{i=\\x1c\\xeb\\xa6>\\x14\\xc3\\xb6=6rW?\\xb3\\xd2\\xbc>t\\'k>F\\xca\\xa7>e\\xce\\x05?JL\\xa2>\\x13\\x02\\t?\\xbcF\\xc6>\\xaf=u?\\r\\x16p?\\x0e\\x002=\\x9b\\x89\\xfa=\\xcdY\\xb0>+\\xf4c>\\xe2Wl?\\x90m\\xa8>\\x14)\\x18?m\\xff\\xc7>\\x8f\\xda\\x1d?\\x19\\xa0\\x1d?Q\\xfbv?\\xcb[\\xae>\\x80\\xc3\\x9c>\\x1eP\\x17?G.\\x9c>d\\xcdD?7\\x1eC?\\xb9hK?E\\xdf\\'>\\xccPQ?\\x8b\\x1a\\xcb>\\x81^T>\\xe7\\x9b\\x97>:\\xa2\\xbe>\\x84e3?\\x00>\\xe1=\\xc0u\\xd0=\\xc3\\xb6\\xca=y\\xbaL?\\x1eU\\xb7>\\x1f`\\xcc>\\x16\\xf1K?\\xb4\\xed*?\\xa5\\x83\\x16>_\\xdd\\xfe=\\x0c\\xa3\\x01?1y\\xdd>\\x7f\\xaa<>\\x97[\\x14?*\\x7f[?n\\xf3:?3g\\xf6>\\x96\\xa1x>%n\\x96=\\xfa?\\x15?\\x08\\xf3\\xac>\\xe8\\xefh?B5\\x7f>\\xfd\\x9cu?L\\x0ew>.\\x1f\\x8c>\\x11\\xae\\xf0>\\xe1\\x0e\\x01?\\xca\\xac\\x17?8\\xb4b?\\xd3\\xa9\\xbf=\\x81\\x17i?\\x18\\xd4\\xd3>X\\x99\\r>\\x1af\\x1e?\\xc4\\xaaa>K\\xd8\\xb9>B\\xb3\\x0c?\\x9d\\xbdB?\\xb0PN?\\xad\\x1ah>\\xb9\\xfaL?\\xf1@*?\\x9d+V?1\\xf9\\xe4>\\x17\\xb1\\xc2>\\x9e\\xa3\\x85;\\xae4\\xdc>\\xdfw\\x1a?\\xee\\xabQ?\\x0f\\x1ab?\\x05\\x1f??^\\xedz?n\\xff\\xde>\\x18W\\xbd=ZT\\x92>\\xe1\\xba7?\\x86\\xd5\\xf6>g\\xee\\xa0>\\xa6R\\xbd=\\x1e\\x04\\x84>\\x07\\x9e\\xe6=\\xc3\\x03\"?\\xe2y\\x94>\\xf0u??~\\xafA>\\xfax\\x19?\\xc2=\\x01?/\\x812?\\xad\\xd2\\xd8<\\'\\x80\\xb1>\\x9c$\\xff<\\xe6\\x0b\\xca>S\\xa6\\xda>\\xe8_u?\\xa4O\\xd8>7t\\x1b=\\xcbA\\xd3>\\xa6\\xa9\\xee>\\xe7\\x82\\xfb>\\xad\\x86x?$\\xbcc?\\x83\\x9bI?\\xcf\\xe48?\\xb5\\x0fm?\\x7f\\xf75>$\\xa3G?\\'\\xee\\x0b>\\x17\\xc5\\x12>\\xbb\\x83Y?\\xfc\\x01 ?-\\x9en?U\\xe5$>\\x10\\t\\r?v\\x05Q?/*\\x1c>\\x06\\xa5t>KNR?\\x06\\xb8\\x16:`\"D?\\x9c\\x94\\x04?\\x1e\\xa2#?\\xfc\\x8b\\xe3>\\x91\\x1fN?\\x1c\\xd0\\x17?\\xd3{\\xe8>\\xe6,4?\\xe9\\x8fP?\\xf1r\\x83>\\x80\\x85\\x80>\\xa7u\\xac$\\xe1>\\xb2y\\x94>\\x88F\\x1f?)H\\xc7>\\x89]\\xf9>\\xf5\\x03\\x82>R\\x8c\\xd0>3\\xaf>?\\xc4&b?\\xab\\xfbW?\\xf8VL=J\\x81V?.\\xe1x>\\x7f\\x1ei?\\xb1\\x16k?\\xe5\\xe24> 5z=\\xfd\\xed\\x0f>\\nN\\xc9>\\xb4\\x84\\xfc>@\\x13k?\\xea\\x0b[=\\xf1.l?\\xdc\\xf9\"?\\nr/?\\xc1)=?B\\x14,?YD6?WK\\x0c?\\x05F\\xfa\\x9cW\\t?\\x8a\\x0c\\x1f?i=\\x15?nq\\x02;\\x9d\\xefj?^eA>0\\xc2=?O\\xca\\xf2=\\'\\xb9\\xf1=\\x96Y\\x1f>W\\x838?x\\x1a\\xe5>\\x02r\\x1f?\\xca\\x14\\x1a?\\x0e\\rU>\\nG\\xb1>\\xcdKP<\\x18\\x83\\x93=\\xdf\\x84\\x88>p\\x02\\xa6>\\xad\\xc3\\xe9<\\xe8\\x07_?\\x92U\\t?\\x1bs\\x00?\\xc0\\xb1\\xc6>\\xef\\xfc\\xca=k\"8>\\xb2,\\x00>\\xa6\\x1d7>\\x17\\xc1\\x05>l.N?\\xd3i\\xb3>\\xdd\\xc0\\x0b?\\xfdmn=H\\xf0\\xd3;ZN/?H\\x15\\xb4>L*n?Nq(?E+]?$\\x92\\x14?x\\x9b\\x1b?^`\\xbb=\\x0e\\xc8r?\\xb47<>\\xcdv\\x1d?\\xc3\\xda\\xe0>6P\\xd2>A\\x99\\xc4>0\\x85X?\\x86\\xe8%?t*\\xb1>\\xbf\\xb0\\xaa>N\\xb9\\xfd>\\x10d\\x92>\\xf7X\\x0e?\\x04\\x9c\\xb1>HM\\x16?\\xf8\\x1c\\x82>\\x01\\xb2\\xab=\\xa2\\xce\\x15>\\x9e\\xcdl?bb1?wz\\xf6\\xcc\\xfa\\xa4>]\\x86B?\\xb8\\x9aV>\\xe7\\xe86?\\x10\\x9f\\xea==P,?\\xc2T\\xc0>0p\\x1d>?}\\xd6>\\xd4\\x08\\xfd=\\xe5}\\x92>\\x98\\x95\\xd0>\\xf1\\x8f\\x0e>\\x07OV?\\xd8F\\xc9>\\xf0\\x95\\x10?\\xdc\\xf9i?\\x95\\xc8\\xb6<\\xa0%\\\\>\\x15\\x99??\\x1c\\x8a\\xfe>*<->\\xc0\\xf9\\xba=\\x17\\xa1\\xd0> O\\xff><\\xfd^?\\x1c\\xb9\\xbf>\\xbed\\x8f>\\x8c\\x14\\x7f>\\x12j\\xbc>30x?\\x8b~\\x85>\\xdez\\x17?m\\xd1_?\\xc0\\n\\xde>\\xecV\\x84=\\x0fj\\x84>\\xb8)\\xef<=n\\xe9>~\\x8e\\xf0>\\xee\\xc3z?\\x9a\\xa5-?\\x1d\\xea7?\\x14\\xec\\xc3>\\x1a|W>\\x00h\\x97>\\xc9\\xe0\\x89>t\\xdeH?\\xf4PN?\\x94X\\x13?\\xbd\\x10\\xa5>\\x7f\\xab6?\\xce\\xab\\x07?\\xceQr?/t\\x1d?H\\r\\xf2=\\x0bX\\xae<\\x11\\xb3\\x0f?\\xff\\xe7N?\\xf5\\xcf%?L\\xe8.?\\xf7A\\x19?\\x05\\x18%>/#\\x91=\\xd3A\\t?\\xbb\\xce\\x06?\\x1bS\\r>\\x13\\xd8.?(+\\x15?\\xf9\\x1e\\x01>\\xdf\\xf1\\x96>\\xe7\\x953?Y\\x1d>=\\xd9\\xf4\\x91>\\x16wA?\\x84V8?O\\x8b<=\\rd\\xbe>\\xf1\\xc2\\xcd>-\\x10:?\\x02>\\x96>VI\\xbc>n\\x9b\\x10?\\xbf\\x97\\x14?A\\x85\\xce>a\\xae\\x9e>U5%?d\\xda\\x01?\\xd5N\\xa2>\\xcd\\xe0\\xf6>:\\x9f\\x1d>V\"9?\\xa9\\\\\\x04?u2t:+$\\xfe>\\x18\\x00??-\\t\\xcd>\\xdd\\xe3W>\\xe1\\x8c*?\\xd0\\xa2\\x11>U\\xa0x>\\xf6Hk>\\xddwP=\\x85cg?\\x9d\\x18u?z\\xceb>4\\x11\\xd6=\\n \\xe9>W\\xff\\xcc>\\xb8\\x0b\\x95>\\xab\\xcd\\x0b?\\xcc\\xf5q?dI=?\\xb9\\xa5x?\\x92\\x02\\x1c?\\xabm\\xb4>N\\xa7%?A5\\xcc>;;u>>~\\xed=\\x8fp&?\\xa3K\\x8c=%r\\t?\\xb6ns?\\xa1\\x89H?\\x01=\\xc0>\\xce\\x1e\\xfd=\\x08\\xfc\\x91>\\x97D\\xa6>\\x01\\x9ae?w\\xe4\\x9f>\\xd8\\x14\\xb7d\\xac\\t?\\xd5i\\x8a>\\xdf\\xb4\\n?S\\xa0\\x1f?Pu1>J\\rd?\\x02\\xc8M>\\x97\\xb9E?\\x0f8c?\\xcc\\x14&?\\xd5\\xa6\\x95=\\r\\xdd\\x80>I\\x81G?\\x8eC~?h\\x07L?A?J?\\x15j\\x0f?\\xcat\\xf1>\\x8a.\\xcb>\\xee\\xacy?,Q\\xdf=\\xa4>\\xc2>-\\n\\x11?-z\\xf8=oV1>\\'\\xf9B?\\x86W\\xa2>\\x9f\\xd8a<\\xfd1\\x07?&\\xd1\\x16?\\x0c\\x17\\xe0>\\xbe\\tP>T@\\xeb=\\xfe:\\x97>\\x05o\\x83;\\x9d-\"?Ks\\xef>\\xde\\x8e\\x13?\\xc9\\xe7\\x98>[\\xdb=>\\xafs!?9\\xe7\\xae=\\\\\\xce\\t?F\\xec\\x93>\\xd4\\xe05>\\x9f\\xa5:>\\xefe\\xa3>Z\\x8e\\xfa>\\x12\\xc7\\xda<\\x91\\xbd\\xfb>E\\r\\xbd>37T?\\xf6\\xdc%?\\xc28\\x01=DB\\xc7>K\\x022?\\xb5\\xd3\\x8a>\\xb1\\xd3H?\\x1f\\xe2q?\\x1b\\x91k?\\xa5\\xb0$?\\xee\\xa6\\xa4>(\\xae\\xe6<\\']\\x18?\\xf5\\xce\\x95>\\xdc\\x06\\x1f?\\x06\\xa9\\xce=\\xe0u\\xbb>e\\xc1j>:\\xf4D?\\n\\xc6\\\\?\\x83\\xb7Q?\\xebT\\x15?%g0>?\\xa7s>C(@?\\x86GQ<`go?_\\x8dp?!B\\x8f>\\xd1\\x16\\xea>\\x03\\xed\\xbe>ni\\x13>ezA>\\xdf<\\x02<\\xda\\xe1x>\\x91\\xd6\\x81>\\x1c.k?\\xe6\\x8b\\xa7>\\xeaf\\xa7\\x1f?}\\xf7a>\\xff\\x96q?\\xd2\\xf8\\xed>f\\xd0\\xcd\\x06\\xe8>B\\xb6\\xd9>0W6?\\xc2\\t\\x19?x\\x909?\\xc7\\x06\\xd6>Xa\\xae>\\n\\x12\\x06?_\\xacq?\\x9d\\x85L<8\\x8f\\xdf>\\x87\\xfcE?\\x89\\xbbT?\\x0fb{?a\\xf6\\x86;{\\xa6T?\\xca\\xb2\\xeb>\\xc8\\xa0\\xca>\\x08\\xa5q?\\xe4`\\x85>\\x7f}\\xb6=k\\xe6W?\\xd0\\xaf3>,\\x1e)=\\x02\\x0f7?\\xb8\\xd4r?\\xfa\\x0bD?\\xc5\\x8b@?\\x1dFQ?\\xdc\\xdd_?!\\xb0\\x06<\\x04v\\xbc>\\x12kN?\\xa7*\\xd7>\\x8ab\\xa2>q]N?\\x8b\\']=9\\xefF?\\xcd\\xc6\\t?\\xf3OO?\\xc48e>*vC>\\x80\\x9a\\xc3=\\x8d\\xa9J?L@j?\\x985\\xb3>\\xa0\\xd5\\xc9>\\x7f\\x04\\xc0>\\x94\\xeb\\x0f?F\\xa9\\x91>h\\xf3\\xed>\\x08\\xbe\\x1d>-\\xc79>\\x0b[R?\\xcc\\x17\\x06?\\xf5@\\xe3>5\\xc1_?\\xceF\\xb0>\\xf8\\x8d\\xb9>\\x10{3?\\xf5\\xce\\xee>\\x89\\xceu?:\\x14f?\\xb7\\xc6\\xf2=y\\xc5\\xa1>\\x13\\xd8t?6d\\xbc>X\\xd2\\xa4>\\xdb\\x1ez??\\x15\\\\?\\x9a2\\x1e?\\x8b\\x9eS>j\\x92\\xe0>\\xf3\\xcb\\xf3=\\x12\\xdb\\t?\\xb8\\xd8_>\\x8a\\x11A?\\x93\\xc3\\xdf=\\xb8\\x9bq?\\xe0\\x9a,?\\xd3\\xe6^>\\xb6\\xecQ?\\xba\\xa0\\x85>\\x95\\xcb\\x90=\\xc6\\x8b\\xba>\\xdd\\xafq?vj-?\\xdc\\xd0\\xe7=\\xa0\\x1f\\xca>v\\xce\\xb9=/\\xa9;?Y_\\xcc>\\x99\\x8e0?\\xf55.?+9\\\\>y|\\xba=\\xa4\\xe9\\x14?.%\\xb3>\\xde\\xd9\\x1e?w,\\x88>\\xad\\x86\\x94=\\xa0\\xd8)?\\x9f\\x1d\\xb2>\\xdbx\\xa9>U4\\x00?#\\xbcX?I\\xc3\\x8e='}]"
]
},
"execution_count": 5,
@@ -176,6 +176,7 @@
" Query(\"*=>[KNN 2 @vector $vec as score]\")\n",
" .sort_by(\"score\")\n",
" .return_fields(\"id\", \"score\")\n",
+ " .return_field(\"vector\", decode_field=False) # return the vector field as bytes\n",
" .paging(0, 2)\n",
" .dialect(2)\n",
")\n",
diff --git a/docs/examples/ssl_connection_examples.ipynb b/docs/examples/ssl_connection_examples.ipynb
index a3d015619f..c94c4e0191 100644
--- a/docs/examples/ssl_connection_examples.ipynb
+++ b/docs/examples/ssl_connection_examples.ipynb
@@ -11,12 +11,12 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Connecting to a Redis instance via SSL."
+ "## Connecting to a Redis instance via SSL"
]
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": null,
"metadata": {},
"outputs": [
{
@@ -25,7 +25,7 @@
"True"
]
},
- "execution_count": 5,
+ "execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
@@ -33,8 +33,13 @@
"source": [
"import redis\n",
"\n",
- "ssl_connection = redis.Redis(host='localhost', port=6666, ssl=True, ssl_cert_reqs=\"none\")\n",
- "ssl_connection.ping()"
+ "r = redis.Redis(\n",
+ " host='localhost', \n",
+ " port=6666, \n",
+ " ssl=True, \n",
+ " ssl_cert_reqs=\"none\",\n",
+ ")\n",
+ "r.ping()"
]
},
{
@@ -48,39 +53,30 @@
"cell_type": "code",
"execution_count": null,
"metadata": {},
- "outputs": [],
- "source": [
- "import redis\n",
- "url_connection = redis.from_url(\"redis://localhost:6379?ssl_cert_reqs=none&decode_responses=True&health_check_interval=2\")\n",
- "url_connection.ping()"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "04e70233",
- "metadata": {},
- "source": [
- "## Connecting to a Redis instance using ConnectionPool"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "2903de26",
- "metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "True"
+ ]
+ },
+ "execution_count": null,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"import redis\n",
- "redis_pool = redis.ConnectionPool(host=\"localhost\", port=6666, connection_class=redis.SSLConnection)\n",
- "ssl_connection = redis.StrictRedis(connection_pool=redis_pool) \n",
- "ssl_connection.ping()"
+ "\n",
+ "r = redis.from_url(\"rediss://localhost:6666?ssl_cert_reqs=none&decode_responses=True&health_check_interval=2\")\n",
+ "r.ping()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Connecting to a Redis instance via SSL, while specifying a minimum TLS version"
+ "## Connecting to a Redis instance using a ConnectionPool"
]
},
{
@@ -94,34 +90,35 @@
"True"
]
},
- "execution_count": 6,
+ "execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import redis\n",
- "import ssl\n",
"\n",
- "ssl_conn = redis.Redis(\n",
- " host=\"localhost\",\n",
- " port=6666,\n",
- " ssl=True,\n",
- " ssl_min_version=ssl.TLSVersion.TLSv1_3,\n",
+ "redis_pool = redis.ConnectionPool(\n",
+ " host=\"localhost\", \n",
+ " port=6666, \n",
+ " connection_class=redis.SSLConnection, \n",
+ " ssl_cert_reqs=\"none\",\n",
")\n",
- "ssl_conn.ping()"
+ "\n",
+ "r = redis.StrictRedis(connection_pool=redis_pool) \n",
+ "r.ping()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Connecting to a Redis instance via SSL, while specifying a self-signed SSL certificate."
+ "## Connecting to a Redis instance via SSL, while specifying a minimum TLS version"
]
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": null,
"metadata": {},
"outputs": [
{
@@ -130,42 +127,30 @@
"True"
]
},
- "execution_count": 6,
+ "execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
- "import os\n",
"import redis\n",
+ "import ssl\n",
"\n",
- "ssl_certfile=\"some-certificate.pem\"\n",
- "ssl_keyfile=\"some-key.pem\"\n",
- "ssl_ca_certs=ssl_certfile\n",
- "\n",
- "ssl_cert_conn = redis.Redis(\n",
+ "r = redis.Redis(\n",
" host=\"localhost\",\n",
" port=6666,\n",
" ssl=True,\n",
- " ssl_certfile=ssl_certfile,\n",
- " ssl_keyfile=ssl_keyfile,\n",
- " ssl_cert_reqs=\"required\",\n",
- " ssl_ca_certs=ssl_ca_certs,\n",
+ " ssl_min_version=ssl.TLSVersion.TLSv1_3,\n",
+ " ssl_cert_reqs=\"none\",\n",
")\n",
- "ssl_cert_conn.ping()"
+ "r.ping()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Connecting to a Redis instance via SSL, and validate the OCSP status of the certificate\n",
- "\n",
- "The redis package is design to be small, meaning extra libraries must be installed, in order to support OCSP stapling. As a result, first install redis via:\n",
- "\n",
- "*pip install redis[ocsp]*\n",
- "\n",
- "This will install cryptography, requests, and PyOpenSSL, none of which are generally required to use Redis."
+ "## Connecting to a Redis instance via SSL, while specifying a self-signed SSL CA certificate"
]
},
{
@@ -179,48 +164,42 @@
"True"
]
},
+ "execution_count": null,
"metadata": {},
- "output_type": "display_data"
+ "output_type": "execute_result"
}
],
"source": [
"import os\n",
"import redis\n",
"\n",
- "ssl_certfile=\"some-certificate.pem\"\n",
- "ssl_keyfile=\"some-key.pem\"\n",
- "ssl_ca_certs=ssl_certfile\n",
+ "pki_dir = os.path.join(\"..\", \"..\", \"dockers\", \"stunnel\", \"keys\")\n",
"\n",
- "ssl_cert_conn = redis.Redis(\n",
+ "r = redis.Redis(\n",
" host=\"localhost\",\n",
" port=6666,\n",
" ssl=True,\n",
- " ssl_certfile=ssl_certfile,\n",
- " ssl_keyfile=ssl_keyfile,\n",
+ " ssl_certfile=os.path.join(pki_dir, \"client-cert.pem\"),\n",
+ " ssl_keyfile=os.path.join(pki_dir, \"client-key.pem\"),\n",
" ssl_cert_reqs=\"required\",\n",
- " ssl_validate_ocsp=True\n",
+ " ssl_ca_certs=os.path.join(pki_dir, \"ca-cert.pem\"),\n",
")\n",
- "ssl_cert_conn.ping()"
+ "r.ping()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Connect via SSL, validate OCSP-stapled certificates\n",
+ "## Connecting to a Redis instance via SSL, and validate the OCSP status of the certificate\n",
"\n",
- "The redis package is design to be small, meaning extra libraries must be installed, in order to support OCSP stapling. As a result, first install redis via:\n",
+ "The redis package is designed to be small, meaning extra libraries must be installed, in order to support OCSP stapling. As a result, first install redis via:\n",
"\n",
- "*pip install redis[ocsp]*\n",
+ "`pip install redis[ocsp]`\n",
"\n",
- "This will install cryptography, requests, and PyOpenSSL, none of which are generally required to use Redis."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Using a custom SSL context and validating against an expected certificate"
+ "This will install cryptography, requests, and PyOpenSSL, none of which are generally required to use Redis.\n",
+ "\n",
+ "In the next example, we will connect to a Redis instance via SSL, and validate the OCSP status of the certificate. However, the certificate we are using does not have an AIA extension, which means that the OCSP validation cannot be performed."
]
},
{
@@ -229,81 +208,88 @@
"metadata": {},
"outputs": [
{
- "data": {
- "text/plain": [
- "True"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "OCSP validation failed as expected.\n"
+ ]
}
],
"source": [
+ "import os\n",
"import redis\n",
- "import OpenSSL\n",
- "\n",
- "ssl_certfile=\"some-certificate.pem\"\n",
- "ssl_keyfile=\"some-key.pem\"\n",
- "ssl_ca_certs=ssl_certfile\n",
- "ssl_expected_certificate = \"expected-ocsp-certificate.pem\"\n",
"\n",
- "# PyOpenSSL is used only for the purpose of validating the ocsp\n",
- "# stapled response\n",
- "ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)\n",
- "ctx.use_certificate_file=ssl_certfile\n",
- "ctx.use_privatekey_file=ssl_keyfile\n",
- "expected_certificate = open(ssl_expected_certificate, 'rb').read()\n",
+ "pki_dir = os.path.join(\"..\", \"..\", \"dockers\", \"stunnel\", \"keys\")\n",
"\n",
- "ssl_cert_conn = redis.Redis(\n",
+ "r = redis.Redis(\n",
" host=\"localhost\",\n",
" port=6666,\n",
" ssl=True,\n",
- " ssl_certfile=ssl_certfile,\n",
- " ssl_keyfile=ssl_keyfile,\n",
+ " ssl_certfile=os.path.join(pki_dir, \"client-cert.pem\"),\n",
+ " ssl_keyfile=os.path.join(pki_dir, \"client-key.pem\"),\n",
" ssl_cert_reqs=\"required\",\n",
- " ssl_ocsp_context=ctx,\n",
- " ssl_ocsp_expected_cert=expected_certificate,\n",
+ " ssl_ca_certs=os.path.join(pki_dir, \"ca-cert.pem\"),\n",
+ " ssl_validate_ocsp=True,\n",
")\n",
- "ssl_cert_conn.ping()"
+ "\n",
+ "try:\n",
+ " r.ping()\n",
+ "except redis.ConnectionError as e:\n",
+ " assert e.args[0] == \"No AIA information present in ssl certificate\"\n",
+ " print(\"OCSP validation failed as expected.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Naive validation of a stapled OCSP certificate"
+ "## Connect to a Redis instance via SSL, and validate OCSP-stapled certificates\n",
+ "\n",
+ "It is also possible to validate an OCSP stapled response. Again, for this example the server does not send an OCSP stapled response, so the validation will fail."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "OCSP validation failed as expected.\n"
+ ]
+ }
+ ],
"source": [
+ "import os\n",
"import redis\n",
- "import OpenSSL\n",
"\n",
- "ssl_certfile=\"some-certificate.pem\"\n",
- "ssl_keyfile=\"some-key.pem\"\n",
- "ssl_ca_certs=ssl_certfile\n",
- "ssl_expected_certificate = \"expected-ocsp-certificate.pem\"\n",
+ "pki_dir = os.path.join(\"..\", \"..\", \"dockers\", \"stunnel\", \"keys\")\n",
+ "ca_cert = os.path.join(pki_dir, \"ca-cert.pem\")\n",
+ "\n",
+ "# It is possible to specify an expected certificate, or leave it out.\n",
+ "expected_certificate = open(ca_cert, 'rb').read()\n",
"\n",
- "# PyOpenSSL is used only for the purpose of validating the ocsp\n",
- "# stapled response\n",
- "ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)\n",
- "ctx.use_certificate_file=ssl_certfile\n",
- "ctx.use_privatekey_file=ssl_keyfile\n",
+ "# If needed, a custom SSL context for OCSP can be specified via ssl_ocsp_context\n",
"\n",
- "ssl_cert_conn = redis.Redis(\n",
+ "r = redis.Redis(\n",
" host=\"localhost\",\n",
" port=6666,\n",
" ssl=True,\n",
- " ssl_certfile=ssl_certfile,\n",
- " ssl_keyfile=ssl_keyfile,\n",
+ " ssl_certfile=os.path.join(pki_dir, \"client-cert.pem\"),\n",
+ " ssl_keyfile=os.path.join(pki_dir, \"client-key.pem\"),\n",
" ssl_cert_reqs=\"required\",\n",
+ " ssl_ca_certs=ca_cert,\n",
" ssl_validate_ocsp_stapled=True,\n",
+ " ssl_ocsp_expected_cert=expected_certificate,\n",
")\n",
- "ssl_cert_conn.ping()"
+ "\n",
+ "try:\n",
+ " r.ping()\n",
+ "except redis.ConnectionError as e:\n",
+ " assert e.args[0] == \"no ocsp response present\"\n",
+ " print(\"OCSP validation failed as expected.\")"
]
}
],
@@ -325,10 +311,9 @@
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.12"
+ "pygments_lexer": "ipython3"
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/docs/logo-redis.png b/docs/logo-redis.png
deleted file mode 100644
index 45b4a3f284..0000000000
Binary files a/docs/logo-redis.png and /dev/null differ
diff --git a/redis/_parsers/base.py b/redis/_parsers/base.py
index 8e59249bef..0137539d66 100644
--- a/redis/_parsers/base.py
+++ b/redis/_parsers/base.py
@@ -182,7 +182,7 @@ async def can_read_destructive(self) -> bool:
return True
try:
async with async_timeout(0):
- return await self._stream.read(1)
+ return self._stream.at_eof()
except TimeoutError:
return False
diff --git a/redis/_parsers/helpers.py b/redis/_parsers/helpers.py
index 9dc8bd6c8b..290a53a272 100644
--- a/redis/_parsers/helpers.py
+++ b/redis/_parsers/helpers.py
@@ -275,17 +275,22 @@ def parse_xinfo_stream(response, **options):
data = {str_if_bytes(k): v for k, v in response.items()}
if not options.get("full", False):
first = data.get("first-entry")
- if first is not None:
+ if first is not None and first[0] is not None:
data["first-entry"] = (first[0], pairs_to_dict(first[1]))
last = data["last-entry"]
- if last is not None:
+ if last is not None and last[0] is not None:
data["last-entry"] = (last[0], pairs_to_dict(last[1]))
else:
data["entries"] = {_id: pairs_to_dict(entry) for _id, entry in data["entries"]}
- if isinstance(data["groups"][0], list):
+ if len(data["groups"]) > 0 and isinstance(data["groups"][0], list):
data["groups"] = [
pairs_to_dict(group, decode_keys=True) for group in data["groups"]
]
+ for g in data["groups"]:
+ if g["consumers"] and g["consumers"][0] is not None:
+ g["consumers"] = [
+ pairs_to_dict(c, decode_keys=True) for c in g["consumers"]
+ ]
else:
data["groups"] = [
{str_if_bytes(k): v for k, v in group.items()}
@@ -361,7 +366,12 @@ def parse_scan(response, **options):
def parse_hscan(response, **options):
cursor, r = response
- return int(cursor), r and pairs_to_dict(r) or {}
+ no_values = options.get("no_values", False)
+ if no_values:
+ payload = r or []
+ else:
+ payload = r and pairs_to_dict(r) or {}
+ return int(cursor), payload
def parse_zscan(response, **options):
diff --git a/redis/_parsers/hiredis.py b/redis/_parsers/hiredis.py
index 1919d3658e..8c882f38b4 100644
--- a/redis/_parsers/hiredis.py
+++ b/redis/_parsers/hiredis.py
@@ -21,6 +21,11 @@
SERVER_CLOSED_CONNECTION_ERROR,
)
+# Used to signal that hiredis-py does not have enough data to parse.
+# Using `False` or `None` is not reliable, given that the parser can
+# return `False` or `None` for legitimate reasons from RESP payloads.
+NOT_ENOUGH_DATA = object()
+
class _HiredisReaderArgs(TypedDict, total=False):
protocolError: Callable[[str], Exception]
@@ -53,25 +58,26 @@ def on_connect(self, connection, **kwargs):
"protocolError": InvalidResponse,
"replyError": self.parse_error,
"errors": connection.encoder.encoding_errors,
+ "notEnoughData": NOT_ENOUGH_DATA,
}
if connection.encoder.decode_responses:
kwargs["encoding"] = connection.encoder.encoding
self._reader = hiredis.Reader(**kwargs)
- self._next_response = False
+ self._next_response = NOT_ENOUGH_DATA
def on_disconnect(self):
self._sock = None
self._reader = None
- self._next_response = False
+ self._next_response = NOT_ENOUGH_DATA
def can_read(self, timeout):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
- if self._next_response is False:
+ if self._next_response is NOT_ENOUGH_DATA:
self._next_response = self._reader.gets()
- if self._next_response is False:
+ if self._next_response is NOT_ENOUGH_DATA:
return self.read_from_socket(timeout=timeout, raise_on_timeout=False)
return True
@@ -110,9 +116,9 @@ def read_response(self, disable_decoding=False):
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
- if self._next_response is not False:
+ if self._next_response is not NOT_ENOUGH_DATA:
response = self._next_response
- self._next_response = False
+ self._next_response = NOT_ENOUGH_DATA
return response
if disable_decoding:
@@ -120,7 +126,7 @@ def read_response(self, disable_decoding=False):
else:
response = self._reader.gets()
- while response is False:
+ while response is NOT_ENOUGH_DATA:
self.read_from_socket()
if disable_decoding:
response = self._reader.gets(False)
@@ -158,6 +164,7 @@ def on_connect(self, connection):
kwargs: _HiredisReaderArgs = {
"protocolError": InvalidResponse,
"replyError": self.parse_error,
+ "notEnoughData": NOT_ENOUGH_DATA,
}
if connection.encoder.decode_responses:
kwargs["encoding"] = connection.encoder.encoding
@@ -172,7 +179,7 @@ def on_disconnect(self):
async def can_read_destructive(self):
if not self._connected:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
- if self._reader.gets():
+ if self._reader.gets() is not NOT_ENOUGH_DATA:
return True
try:
async with async_timeout(0):
@@ -202,7 +209,7 @@ async def read_response(
response = self._reader.gets(False)
else:
response = self._reader.gets()
- while response is False:
+ while response is NOT_ENOUGH_DATA:
await self.read_from_socket()
if disable_decoding:
response = self._reader.gets(False)
diff --git a/redis/asyncio/client.py b/redis/asyncio/client.py
index 480a877d20..ef1e5afcda 100644
--- a/redis/asyncio/client.py
+++ b/redis/asyncio/client.py
@@ -342,7 +342,10 @@ def __init__(
self._single_conn_lock = asyncio.Lock()
def __repr__(self):
- return f"{self.__class__.__name__}<{self.connection_pool!r}>"
+ return (
+ f"<{self.__class__.__module__}.{self.__class__.__name__}"
+ f"({self.connection_pool!r})>"
+ )
def __await__(self):
return self.initialize().__await__()
diff --git a/redis/asyncio/cluster.py b/redis/asyncio/cluster.py
index 3bf147d7a6..93ee8c7533 100644
--- a/redis/asyncio/cluster.py
+++ b/redis/asyncio/cluster.py
@@ -269,7 +269,7 @@ def __init__(
ssl_min_version: Optional[ssl.TLSVersion] = None,
ssl_ciphers: Optional[str] = None,
protocol: Optional[int] = 2,
- address_remap: Optional[Callable[[str, int], Tuple[str, int]]] = None,
+ address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
) -> None:
if db:
raise RedisClusterException(
@@ -1098,7 +1098,7 @@ def __init__(
startup_nodes: List["ClusterNode"],
require_full_coverage: bool,
connection_kwargs: Dict[str, Any],
- address_remap: Optional[Callable[[str, int], Tuple[str, int]]] = None,
+ address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
) -> None:
self.startup_nodes = {node.name: node for node in startup_nodes}
self.require_full_coverage = require_full_coverage
@@ -1226,13 +1226,12 @@ async def initialize(self) -> None:
for startup_node in self.startup_nodes.values():
try:
# Make sure cluster mode is enabled on this node
- if not (await startup_node.execute_command("INFO")).get(
- "cluster_enabled"
- ):
+ try:
+ cluster_slots = await startup_node.execute_command("CLUSTER SLOTS")
+ except ResponseError:
raise RedisClusterException(
"Cluster mode is not enabled on this node"
)
- cluster_slots = await startup_node.execute_command("CLUSTER SLOTS")
startup_nodes_reachable = True
except Exception as e:
# Try the next startup node.
@@ -1433,7 +1432,8 @@ def __exit__(self, exc_type: None, exc_value: None, traceback: None) -> None:
self._command_stack = []
def __bool__(self) -> bool:
- return bool(self._command_stack)
+ "Pipeline instances should always evaluate to True on Python 3+"
+ return True
def __len__(self) -> int:
return len(self._command_stack)
diff --git a/redis/asyncio/connection.py b/redis/asyncio/connection.py
index 0074bce5db..dddd9a2fbb 100644
--- a/redis/asyncio/connection.py
+++ b/redis/asyncio/connection.py
@@ -25,6 +25,8 @@
)
from urllib.parse import ParseResult, parse_qs, unquote, urlparse
+from ..utils import format_error_message
+
# the functionality is available in 3.11.x but has a major issue before
# 3.11.3. See https://github.com/redis/redis-py/issues/2633
if sys.version_info >= (3, 11, 3):
@@ -225,7 +227,7 @@ def _close(self):
def __repr__(self):
repr_args = ",".join((f"{k}={v}" for k, v in self.repr_pieces()))
- return f"{self.__class__.__name__}<{repr_args}>"
+ return f"<{self.__class__.__module__}.{self.__class__.__name__}({repr_args})>"
@abstractmethod
def repr_pieces(self):
@@ -315,9 +317,8 @@ async def _connect(self):
def _host_error(self) -> str:
pass
- @abstractmethod
def _error_message(self, exception: BaseException) -> str:
- pass
+ return format_error_message(self._host_error(), exception)
async def on_connect(self) -> None:
"""Initialize the connection, authenticate and select a database"""
@@ -702,27 +703,6 @@ async def _connect(self):
def _host_error(self) -> str:
return f"{self.host}:{self.port}"
- def _error_message(self, exception: BaseException) -> str:
- # args for socket.error can either be (errno, "message")
- # or just "message"
-
- host_error = self._host_error()
-
- if not exception.args:
- # asyncio has a bug where on Connection reset by peer, the
- # exception is not instanciated, so args is empty. This is the
- # workaround.
- # See: https://github.com/redis/redis-py/issues/2237
- # See: https://github.com/python/cpython/issues/94061
- return f"Error connecting to {host_error}. Connection reset by peer"
- elif len(exception.args) == 1:
- return f"Error connecting to {host_error}. {exception.args[0]}."
- else:
- return (
- f"Error {exception.args[0]} connecting to {host_error}. "
- f"{exception.args[0]}."
- )
-
class SSLConnection(Connection):
"""Manages SSL connections to and from the Redis server(s).
@@ -874,20 +854,6 @@ async def _connect(self):
def _host_error(self) -> str:
return self.path
- def _error_message(self, exception: BaseException) -> str:
- # args for socket.error can either be (errno, "message")
- # or just "message"
- host_error = self._host_error()
- if len(exception.args) == 1:
- return (
- f"Error connecting to unix socket: {host_error}. {exception.args[0]}."
- )
- else:
- return (
- f"Error {exception.args[0]} connecting to unix socket: "
- f"{host_error}. {exception.args[1]}."
- )
-
FALSE_STRINGS = ("0", "F", "FALSE", "N", "NO")
@@ -1061,8 +1027,8 @@ def __init__(
def __repr__(self):
return (
- f"{self.__class__.__name__}"
- f"<{self.connection_class(**self.connection_kwargs)!r}>"
+ f"<{self.__class__.__module__}.{self.__class__.__name__}"
+ f"({self.connection_class(**self.connection_kwargs)!r})>"
)
def reset(self):
diff --git a/redis/asyncio/sentinel.py b/redis/asyncio/sentinel.py
index 6834fb194f..fbd7c4c25d 100644
--- a/redis/asyncio/sentinel.py
+++ b/redis/asyncio/sentinel.py
@@ -30,11 +30,14 @@ def __init__(self, **kwargs):
def __repr__(self):
pool = self.connection_pool
- s = f"{self.__class__.__name__}"
+ return s + ")>"
async def connect_to(self, address):
self.host, self.port = address
@@ -120,8 +123,8 @@ def __init__(self, service_name, sentinel_manager, **kwargs):
def __repr__(self):
return (
- f"{self.__class__.__name__}"
- f""
+ f"<{self.__class__.__module__}.{self.__class__.__name__}"
+ f"(service={self.service_name}({self.is_master and 'master' or 'slave'}))>"
)
def reset(self):
@@ -241,7 +244,10 @@ def __repr__(self):
f"{sentinel.connection_pool.connection_kwargs['host']}:"
f"{sentinel.connection_pool.connection_kwargs['port']}"
)
- return f"{self.__class__.__name__}"
+ return (
+ f"<{self.__class__}.{self.__class__.__name__}"
+ f"(sentinels=[{','.join(sentinel_addresses)}])>"
+ )
def check_master_state(self, state: dict, service_name: str) -> bool:
if not state["is_master"] or state["is_sdown"] or state["is_odown"]:
diff --git a/redis/backoff.py b/redis/backoff.py
index c62e760bdc..f612d60704 100644
--- a/redis/backoff.py
+++ b/redis/backoff.py
@@ -19,7 +19,7 @@ def reset(self):
pass
@abstractmethod
- def compute(self, failures):
+ def compute(self, failures: int) -> float:
"""Compute backoff in seconds upon failure"""
pass
@@ -27,25 +27,25 @@ def compute(self, failures):
class ConstantBackoff(AbstractBackoff):
"""Constant backoff upon failure"""
- def __init__(self, backoff):
+ def __init__(self, backoff: float) -> None:
"""`backoff`: backoff time in seconds"""
self._backoff = backoff
- def compute(self, failures):
+ def compute(self, failures: int) -> float:
return self._backoff
class NoBackoff(ConstantBackoff):
"""No backoff upon failure"""
- def __init__(self):
+ def __init__(self) -> None:
super().__init__(0)
class ExponentialBackoff(AbstractBackoff):
"""Exponential backoff upon failure"""
- def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
+ def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE):
"""
`cap`: maximum backoff time in seconds
`base`: base backoff time in seconds
@@ -53,14 +53,14 @@ def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
self._cap = cap
self._base = base
- def compute(self, failures):
+ def compute(self, failures: int) -> float:
return min(self._cap, self._base * 2**failures)
class FullJitterBackoff(AbstractBackoff):
"""Full jitter backoff upon failure"""
- def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
+ def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE) -> None:
"""
`cap`: maximum backoff time in seconds
`base`: base backoff time in seconds
@@ -68,14 +68,14 @@ def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
self._cap = cap
self._base = base
- def compute(self, failures):
+ def compute(self, failures: int) -> float:
return random.uniform(0, min(self._cap, self._base * 2**failures))
class EqualJitterBackoff(AbstractBackoff):
"""Equal jitter backoff upon failure"""
- def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
+ def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE) -> None:
"""
`cap`: maximum backoff time in seconds
`base`: base backoff time in seconds
@@ -83,7 +83,7 @@ def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
self._cap = cap
self._base = base
- def compute(self, failures):
+ def compute(self, failures: int) -> float:
temp = min(self._cap, self._base * 2**failures) / 2
return temp + random.uniform(0, temp)
@@ -91,7 +91,7 @@ def compute(self, failures):
class DecorrelatedJitterBackoff(AbstractBackoff):
"""Decorrelated jitter backoff upon failure"""
- def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
+ def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE) -> None:
"""
`cap`: maximum backoff time in seconds
`base`: base backoff time in seconds
@@ -100,10 +100,10 @@ def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
self._base = base
self._previous_backoff = 0
- def reset(self):
+ def reset(self) -> None:
self._previous_backoff = 0
- def compute(self, failures):
+ def compute(self, failures: int) -> float:
max_backoff = max(self._base, self._previous_backoff * 3)
temp = random.uniform(self._base, max_backoff)
self._previous_backoff = min(self._cap, temp)
diff --git a/redis/client.py b/redis/client.py
index 5a12f40e7b..33253cfda4 100755
--- a/redis/client.py
+++ b/redis/client.py
@@ -320,7 +320,10 @@ def __init__(
self.response_callbacks.update(_RedisCallbacksRESP2)
def __repr__(self) -> str:
- return f"{type(self).__name__}<{repr(self.connection_pool)}>"
+ return (
+ f"<{type(self).__module__}.{type(self).__name__}"
+ f"({repr(self.connection_pool)})>"
+ )
def get_encoder(self) -> "Encoder":
"""Get the connection pool's encoder"""
diff --git a/redis/cluster.py b/redis/cluster.py
index 4dc0e389bb..6479010958 100644
--- a/redis/cluster.py
+++ b/redis/cluster.py
@@ -499,7 +499,7 @@ def __init__(
read_from_replicas: bool = False,
dynamic_startup_nodes: bool = True,
url: Optional[str] = None,
- address_remap: Optional[Callable[[str, int], Tuple[str, int]]] = None,
+ address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
**kwargs,
):
"""
@@ -1310,7 +1310,7 @@ def __init__(
lock=None,
dynamic_startup_nodes=True,
connection_pool_class=ConnectionPool,
- address_remap: Optional[Callable[[str, int], Tuple[str, int]]] = None,
+ address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
**kwargs,
):
self.nodes_cache = {}
@@ -1513,11 +1513,12 @@ def initialize(self):
)
self.startup_nodes[startup_node.name].redis_connection = r
# Make sure cluster mode is enabled on this node
- if bool(r.info().get("cluster_enabled")) is False:
+ try:
+ cluster_slots = str_if_bytes(r.execute_command("CLUSTER SLOTS"))
+ except ResponseError:
raise RedisClusterException(
"Cluster mode is not enabled on this node"
)
- cluster_slots = str_if_bytes(r.execute_command("CLUSTER SLOTS"))
startup_nodes_reachable = True
except Exception as e:
# Try the next startup node.
diff --git a/redis/commands/core.py b/redis/commands/core.py
index 5cf6b6d89a..a3fecdcead 100644
--- a/redis/commands/core.py
+++ b/redis/commands/core.py
@@ -5,7 +5,6 @@
import warnings
from typing import (
TYPE_CHECKING,
- Any,
AsyncIterator,
Awaitable,
Callable,
@@ -37,6 +36,7 @@
KeysT,
KeyT,
PatternT,
+ ResponseT,
ScriptTextT,
StreamIdT,
TimeoutSecT,
@@ -49,8 +49,6 @@
from redis.asyncio.client import Redis as AsyncRedis
from redis.client import Redis
-ResponseT = Union[Awaitable, Any]
-
class ACLCommands(CommandsProtocol):
"""
@@ -201,69 +199,58 @@ def acl_setuser(
"""
Create or update an ACL user.
- Create or update the ACL for ``username``. If the user already exists,
+ Create or update the ACL for `username`. If the user already exists,
the existing ACL is completely overwritten and replaced with the
specified values.
- ``enabled`` is a boolean indicating whether the user should be allowed
- to authenticate or not. Defaults to ``False``.
-
- ``nopass`` is a boolean indicating whether the can authenticate without
- a password. This cannot be True if ``passwords`` are also specified.
-
- ``passwords`` if specified is a list of plain text passwords
- to add to or remove from the user. Each password must be prefixed with
- a '+' to add or a '-' to remove. For convenience, the value of
- ``passwords`` can be a simple prefixed string when adding or
- removing a single password.
-
- ``hashed_passwords`` if specified is a list of SHA-256 hashed passwords
- to add to or remove from the user. Each hashed password must be
- prefixed with a '+' to add or a '-' to remove. For convenience,
- the value of ``hashed_passwords`` can be a simple prefixed string when
- adding or removing a single password.
-
- ``categories`` if specified is a list of strings representing category
- permissions. Each string must be prefixed with either a '+' to add the
- category permission or a '-' to remove the category permission.
-
- ``commands`` if specified is a list of strings representing command
- permissions. Each string must be prefixed with either a '+' to add the
- command permission or a '-' to remove the command permission.
-
- ``keys`` if specified is a list of key patterns to grant the user
- access to. Keys patterns allow '*' to support wildcard matching. For
- example, '*' grants access to all keys while 'cache:*' grants access
- to all keys that are prefixed with 'cache:'. ``keys`` should not be
- prefixed with a '~'.
-
- ``reset`` is a boolean indicating whether the user should be fully
- reset prior to applying the new ACL. Setting this to True will
- remove all existing passwords, flags and privileges from the user and
- then apply the specified rules. If this is False, the user's existing
- passwords, flags and privileges will be kept and any new specified
- rules will be applied on top.
-
- ``reset_keys`` is a boolean indicating whether the user's key
- permissions should be reset prior to applying any new key permissions
- specified in ``keys``. If this is False, the user's existing
- key permissions will be kept and any new specified key permissions
- will be applied on top.
-
- ``reset_channels`` is a boolean indicating whether the user's channel
- permissions should be reset prior to applying any new channel permissions
- specified in ``channels``.If this is False, the user's existing
- channel permissions will be kept and any new specified channel permissions
- will be applied on top.
-
- ``reset_passwords`` is a boolean indicating whether to remove all
- existing passwords and the 'nopass' flag from the user prior to
- applying any new passwords specified in 'passwords' or
- 'hashed_passwords'. If this is False, the user's existing passwords
- and 'nopass' status will be kept and any new specified passwords
- or hashed_passwords will be applied on top.
-
- For more information see https://redis.io/commands/acl-setuser
+ For more information, see https://redis.io/commands/acl-setuser
+
+ Args:
+ username: The name of the user whose ACL is to be created or updated.
+ enabled: Indicates whether the user should be allowed to authenticate.
+ Defaults to `False`.
+ nopass: Indicates whether the user can authenticate without a password.
+ This cannot be `True` if `passwords` are also specified.
+ passwords: A list of plain text passwords to add to or remove from the user.
+ Each password must be prefixed with a '+' to add or a '-' to
+ remove. For convenience, a single prefixed string can be used
+ when adding or removing a single password.
+ hashed_passwords: A list of SHA-256 hashed passwords to add to or remove
+ from the user. Each hashed password must be prefixed with
+ a '+' to add or a '-' to remove. For convenience, a single
+ prefixed string can be used when adding or removing a
+ single password.
+ categories: A list of strings representing category permissions. Each string
+ must be prefixed with either a '+' to add the category
+ permission or a '-' to remove the category permission.
+ commands: A list of strings representing command permissions. Each string
+ must be prefixed with either a '+' to add the command permission
+ or a '-' to remove the command permission.
+ keys: A list of key patterns to grant the user access to. Key patterns allow
+ '*' to support wildcard matching. For example, '*' grants access to
+ all keys while 'cache:*' grants access to all keys that are prefixed
+ with 'cache:'. `keys` should not be prefixed with a '~'.
+ reset: Indicates whether the user should be fully reset prior to applying
+ the new ACL. Setting this to `True` will remove all existing
+ passwords, flags, and privileges from the user and then apply the
+ specified rules. If `False`, the user's existing passwords, flags,
+ and privileges will be kept and any new specified rules will be
+ applied on top.
+ reset_keys: Indicates whether the user's key permissions should be reset
+ prior to applying any new key permissions specified in `keys`.
+ If `False`, the user's existing key permissions will be kept and
+ any new specified key permissions will be applied on top.
+ reset_channels: Indicates whether the user's channel permissions should be
+ reset prior to applying any new channel permissions
+ specified in `channels`. If `False`, the user's existing
+ channel permissions will be kept and any new specified
+ channel permissions will be applied on top.
+ reset_passwords: Indicates whether to remove all existing passwords and the
+ `nopass` flag from the user prior to applying any new
+ passwords specified in `passwords` or `hashed_passwords`.
+ If `False`, the user's existing passwords and `nopass`
+ status will be kept and any new specified passwords or
+ hashed passwords will be applied on top.
"""
encoder = self.get_encoder()
pieces: List[EncodableT] = [username]
@@ -461,6 +448,7 @@ def client_kill_filter(
skipme: Union[bool, None] = None,
laddr: Union[bool, None] = None,
user: str = None,
+ maxage: Union[int, None] = None,
**kwargs,
) -> ResponseT:
"""
@@ -474,6 +462,7 @@ def client_kill_filter(
options. If skipme is not provided, the server defaults to skipme=True
:param laddr: Kills a client by its 'local (bind) address:port'
:param user: Kills a client for a specific user name
+ :param maxage: Kills clients that are older than the specified age in seconds
"""
args = []
if _type is not None:
@@ -496,6 +485,8 @@ def client_kill_filter(
args.extend((b"LADDR", laddr))
if user is not None:
args.extend((b"USER", user))
+ if maxage is not None:
+ args.extend((b"MAXAGE", maxage))
if not args:
raise DataError(
"CLIENT KILL ... ... "
@@ -3102,6 +3093,7 @@ def hscan(
cursor: int = 0,
match: Union[PatternT, None] = None,
count: Union[int, None] = None,
+ no_values: Union[bool, None] = None,
) -> ResponseT:
"""
Incrementally return key/value slices in a hash. Also return a cursor
@@ -3111,6 +3103,8 @@ def hscan(
``count`` allows for hint the minimum number of returns
+ ``no_values`` indicates to return only the keys, without values.
+
For more information see https://redis.io/commands/hscan
"""
pieces: list[EncodableT] = [name, cursor]
@@ -3118,13 +3112,16 @@ def hscan(
pieces.extend([b"MATCH", match])
if count is not None:
pieces.extend([b"COUNT", count])
- return self.execute_command("HSCAN", *pieces)
+ if no_values is not None:
+ pieces.extend([b"NOVALUES"])
+ return self.execute_command("HSCAN", *pieces, no_values=no_values)
def hscan_iter(
self,
name: str,
match: Union[PatternT, None] = None,
count: Union[int, None] = None,
+ no_values: Union[bool, None] = None,
) -> Iterator:
"""
Make an iterator using the HSCAN command so that the client doesn't
@@ -3133,11 +3130,18 @@ def hscan_iter(
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
+
+ ``no_values`` indicates to return only the keys, without values
"""
cursor = "0"
while cursor != 0:
- cursor, data = self.hscan(name, cursor=cursor, match=match, count=count)
- yield from data.items()
+ cursor, data = self.hscan(
+ name, cursor=cursor, match=match, count=count, no_values=no_values
+ )
+ if no_values:
+ yield from data
+ else:
+ yield from data.items()
def zscan(
self,
@@ -3253,6 +3257,7 @@ async def hscan_iter(
name: str,
match: Union[PatternT, None] = None,
count: Union[int, None] = None,
+ no_values: Union[bool, None] = None,
) -> AsyncIterator:
"""
Make an iterator using the HSCAN command so that the client doesn't
@@ -3261,14 +3266,20 @@ async def hscan_iter(
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
+
+ ``no_values`` indicates to return only the keys, without values
"""
cursor = "0"
while cursor != 0:
cursor, data = await self.hscan(
- name, cursor=cursor, match=match, count=count
+ name, cursor=cursor, match=match, count=count, no_values=no_values
)
- for it in data.items():
- yield it
+ if no_values:
+ for it in data:
+ yield it
+ else:
+ for it in data.items():
+ yield it
async def zscan_iter(
self,
@@ -5076,6 +5087,357 @@ def hstrlen(self, name: str, key: str) -> Union[Awaitable[int], int]:
"""
return self.execute_command("HSTRLEN", name, key)
+ def hexpire(
+ self,
+ name: KeyT,
+ seconds: ExpiryT,
+ *fields: str,
+ nx: bool = False,
+ xx: bool = False,
+ gt: bool = False,
+ lt: bool = False,
+ ) -> ResponseT:
+ """
+ Sets or updates the expiration time for fields within a hash key, using relative
+ time in seconds.
+
+ If a field already has an expiration time, the behavior of the update can be
+ controlled using the `nx`, `xx`, `gt`, and `lt` parameters.
+
+ The return value provides detailed information about the outcome for each field.
+
+ For more information, see https://redis.io/commands/hexpire
+
+ Args:
+ name: The name of the hash key.
+ seconds: Expiration time in seconds, relative. Can be an integer, or a
+ Python `timedelta` object.
+ fields: List of fields within the hash to apply the expiration time to.
+ nx: Set expiry only when the field has no expiry.
+ xx: Set expiry only when the field has an existing expiry.
+ gt: Set expiry only when the new expiry is greater than the current one.
+ lt: Set expiry only when the new expiry is less than the current one.
+
+ Returns:
+ Returns a list which contains for each field in the request:
+ - `-2` if the field does not exist, or if the key does not exist.
+ - `0` if the specified NX | XX | GT | LT condition was not met.
+ - `1` if the expiration time was set or updated.
+ - `2` if the field was deleted because the specified expiration time is
+ in the past.
+ """
+ conditions = [nx, xx, gt, lt]
+ if sum(conditions) > 1:
+ raise ValueError("Only one of 'nx', 'xx', 'gt', 'lt' can be specified.")
+
+ if isinstance(seconds, datetime.timedelta):
+ seconds = int(seconds.total_seconds())
+
+ options = []
+ if nx:
+ options.append("NX")
+ if xx:
+ options.append("XX")
+ if gt:
+ options.append("GT")
+ if lt:
+ options.append("LT")
+
+ return self.execute_command(
+ "HEXPIRE", name, seconds, *options, "FIELDS", len(fields), *fields
+ )
+
+ def hpexpire(
+ self,
+ name: KeyT,
+ milliseconds: ExpiryT,
+ *fields: str,
+ nx: bool = False,
+ xx: bool = False,
+ gt: bool = False,
+ lt: bool = False,
+ ) -> ResponseT:
+ """
+ Sets or updates the expiration time for fields within a hash key, using relative
+ time in milliseconds.
+
+ If a field already has an expiration time, the behavior of the update can be
+ controlled using the `nx`, `xx`, `gt`, and `lt` parameters.
+
+ The return value provides detailed information about the outcome for each field.
+
+ For more information, see https://redis.io/commands/hpexpire
+
+ Args:
+ name: The name of the hash key.
+ milliseconds: Expiration time in milliseconds, relative. Can be an integer,
+ or a Python `timedelta` object.
+ fields: List of fields within the hash to apply the expiration time to.
+ nx: Set expiry only when the field has no expiry.
+ xx: Set expiry only when the field has an existing expiry.
+ gt: Set expiry only when the new expiry is greater than the current one.
+ lt: Set expiry only when the new expiry is less than the current one.
+
+ Returns:
+ Returns a list which contains for each field in the request:
+ - `-2` if the field does not exist, or if the key does not exist.
+ - `0` if the specified NX | XX | GT | LT condition was not met.
+ - `1` if the expiration time was set or updated.
+ - `2` if the field was deleted because the specified expiration time is
+ in the past.
+ """
+ conditions = [nx, xx, gt, lt]
+ if sum(conditions) > 1:
+ raise ValueError("Only one of 'nx', 'xx', 'gt', 'lt' can be specified.")
+
+ if isinstance(milliseconds, datetime.timedelta):
+ milliseconds = int(milliseconds.total_seconds() * 1000)
+
+ options = []
+ if nx:
+ options.append("NX")
+ if xx:
+ options.append("XX")
+ if gt:
+ options.append("GT")
+ if lt:
+ options.append("LT")
+
+ return self.execute_command(
+ "HPEXPIRE", name, milliseconds, *options, "FIELDS", len(fields), *fields
+ )
+
+ def hexpireat(
+ self,
+ name: KeyT,
+ unix_time_seconds: AbsExpiryT,
+ *fields: str,
+ nx: bool = False,
+ xx: bool = False,
+ gt: bool = False,
+ lt: bool = False,
+ ) -> ResponseT:
+ """
+ Sets or updates the expiration time for fields within a hash key, using an
+ absolute Unix timestamp in seconds.
+
+ If a field already has an expiration time, the behavior of the update can be
+ controlled using the `nx`, `xx`, `gt`, and `lt` parameters.
+
+ The return value provides detailed information about the outcome for each field.
+
+ For more information, see https://redis.io/commands/hexpireat
+
+ Args:
+ name: The name of the hash key.
+ unix_time_seconds: Expiration time as Unix timestamp in seconds. Can be an
+ integer or a Python `datetime` object.
+ fields: List of fields within the hash to apply the expiration time to.
+ nx: Set expiry only when the field has no expiry.
+ xx: Set expiry only when the field has an existing expiration time.
+ gt: Set expiry only when the new expiry is greater than the current one.
+ lt: Set expiry only when the new expiry is less than the current one.
+
+ Returns:
+ Returns a list which contains for each field in the request:
+ - `-2` if the field does not exist, or if the key does not exist.
+ - `0` if the specified NX | XX | GT | LT condition was not met.
+ - `1` if the expiration time was set or updated.
+ - `2` if the field was deleted because the specified expiration time is
+ in the past.
+ """
+ conditions = [nx, xx, gt, lt]
+ if sum(conditions) > 1:
+ raise ValueError("Only one of 'nx', 'xx', 'gt', 'lt' can be specified.")
+
+ if isinstance(unix_time_seconds, datetime.datetime):
+ unix_time_seconds = int(unix_time_seconds.timestamp())
+
+ options = []
+ if nx:
+ options.append("NX")
+ if xx:
+ options.append("XX")
+ if gt:
+ options.append("GT")
+ if lt:
+ options.append("LT")
+
+ return self.execute_command(
+ "HEXPIREAT",
+ name,
+ unix_time_seconds,
+ *options,
+ "FIELDS",
+ len(fields),
+ *fields,
+ )
+
+ def hpexpireat(
+ self,
+ name: KeyT,
+ unix_time_milliseconds: AbsExpiryT,
+ *fields: str,
+ nx: bool = False,
+ xx: bool = False,
+ gt: bool = False,
+ lt: bool = False,
+ ) -> ResponseT:
+ """
+ Sets or updates the expiration time for fields within a hash key, using an
+ absolute Unix timestamp in milliseconds.
+
+ If a field already has an expiration time, the behavior of the update can be
+ controlled using the `nx`, `xx`, `gt`, and `lt` parameters.
+
+ The return value provides detailed information about the outcome for each field.
+
+ For more information, see https://redis.io/commands/hpexpireat
+
+ Args:
+ name: The name of the hash key.
+ unix_time_milliseconds: Expiration time as Unix timestamp in milliseconds.
+ Can be an integer or a Python `datetime` object.
+ fields: List of fields within the hash to apply the expiry.
+ nx: Set expiry only when the field has no expiry.
+ xx: Set expiry only when the field has an existing expiry.
+ gt: Set expiry only when the new expiry is greater than the current one.
+ lt: Set expiry only when the new expiry is less than the current one.
+
+ Returns:
+ Returns a list which contains for each field in the request:
+ - `-2` if the field does not exist, or if the key does not exist.
+ - `0` if the specified NX | XX | GT | LT condition was not met.
+ - `1` if the expiration time was set or updated.
+ - `2` if the field was deleted because the specified expiration time is
+ in the past.
+ """
+ conditions = [nx, xx, gt, lt]
+ if sum(conditions) > 1:
+ raise ValueError("Only one of 'nx', 'xx', 'gt', 'lt' can be specified.")
+
+ if isinstance(unix_time_milliseconds, datetime.datetime):
+ unix_time_milliseconds = int(unix_time_milliseconds.timestamp() * 1000)
+
+ options = []
+ if nx:
+ options.append("NX")
+ if xx:
+ options.append("XX")
+ if gt:
+ options.append("GT")
+ if lt:
+ options.append("LT")
+
+ return self.execute_command(
+ "HPEXPIREAT",
+ name,
+ unix_time_milliseconds,
+ *options,
+ "FIELDS",
+ len(fields),
+ *fields,
+ )
+
+ def hpersist(self, name: KeyT, *fields: str) -> ResponseT:
+ """
+ Removes the expiration time for each specified field in a hash.
+
+ For more information, see https://redis.io/commands/hpersist
+
+ Args:
+ name: The name of the hash key.
+ fields: A list of fields within the hash from which to remove the
+ expiration time.
+
+ Returns:
+ Returns a list which contains for each field in the request:
+ - `-2` if the field does not exist, or if the key does not exist.
+ - `-1` if the field exists but has no associated expiration time.
+ - `1` if the expiration time was successfully removed from the field.
+ """
+ return self.execute_command("HPERSIST", name, "FIELDS", len(fields), *fields)
+
+ def hexpiretime(self, key: KeyT, *fields: str) -> ResponseT:
+ """
+ Returns the expiration times of hash fields as Unix timestamps in seconds.
+
+ For more information, see https://redis.io/commands/hexpiretime
+
+ Args:
+ key: The hash key.
+ fields: A list of fields within the hash for which to get the expiration
+ time.
+
+ Returns:
+ Returns a list which contains for each field in the request:
+ - `-2` if the field does not exist, or if the key does not exist.
+ - `-1` if the field exists but has no associated expire time.
+ - A positive integer representing the expiration Unix timestamp in
+ seconds, if the field has an associated expiration time.
+ """
+ return self.execute_command("HEXPIRETIME", key, "FIELDS", len(fields), *fields)
+
+ def hpexpiretime(self, key: KeyT, *fields: str) -> ResponseT:
+ """
+ Returns the expiration times of hash fields as Unix timestamps in milliseconds.
+
+ For more information, see https://redis.io/commands/hpexpiretime
+
+ Args:
+ key: The hash key.
+ fields: A list of fields within the hash for which to get the expiration
+ time.
+
+ Returns:
+ Returns a list which contains for each field in the request:
+ - `-2` if the field does not exist, or if the key does not exist.
+ - `-1` if the field exists but has no associated expire time.
+ - A positive integer representing the expiration Unix timestamp in
+ milliseconds, if the field has an associated expiration time.
+ """
+ return self.execute_command("HPEXPIRETIME", key, "FIELDS", len(fields), *fields)
+
+ def httl(self, key: KeyT, *fields: str) -> ResponseT:
+ """
+ Returns the TTL (Time To Live) in seconds for each specified field within a hash
+ key.
+
+ For more information, see https://redis.io/commands/httl
+
+ Args:
+ key: The hash key.
+ fields: A list of fields within the hash for which to get the TTL.
+
+ Returns:
+ Returns a list which contains for each field in the request:
+ - `-2` if the field does not exist, or if the key does not exist.
+ - `-1` if the field exists but has no associated expire time.
+ - A positive integer representing the TTL in seconds if the field has
+ an associated expiration time.
+ """
+ return self.execute_command("HTTL", key, "FIELDS", len(fields), *fields)
+
+ def hpttl(self, key: KeyT, *fields: str) -> ResponseT:
+ """
+ Returns the TTL (Time To Live) in milliseconds for each specified field within a
+ hash key.
+
+ For more information, see https://redis.io/commands/hpttl
+
+ Args:
+ key: The hash key.
+ fields: A list of fields within the hash for which to get the TTL.
+
+ Returns:
+ Returns a list which contains for each field in the request:
+ - `-2` if the field does not exist, or if the key does not exist.
+ - `-1` if the field exists but has no associated expire time.
+ - A positive integer representing the TTL in milliseconds if the field
+ has an associated expiration time.
+ """
+ return self.execute_command("HPTTL", key, "FIELDS", len(fields), *fields)
+
AsyncHashCommands = HashCommands
@@ -5251,7 +5613,7 @@ def pubsub_shardnumsub(self, *args: ChannelT, **kwargs) -> ResponseT:
class ScriptCommands(CommandsProtocol):
"""
Redis Lua script commands. see:
- https://redis.com/ebook/part-3-next-steps/chapter-11-scripting-redis-with-lua/
+ https://redis.io/ebook/part-3-next-steps/chapter-11-scripting-redis-with-lua/
"""
def _eval(
diff --git a/redis/commands/search/_util.py b/redis/commands/search/_util.py
index dd1dff33dd..191600d7c6 100644
--- a/redis/commands/search/_util.py
+++ b/redis/commands/search/_util.py
@@ -1,7 +1,7 @@
-def to_string(s):
+def to_string(s, encoding: str = "utf-8"):
if isinstance(s, str):
return s
elif isinstance(s, bytes):
- return s.decode("utf-8", "ignore")
+ return s.decode(encoding, "ignore")
else:
return s # Not a string we care about
diff --git a/redis/commands/search/commands.py b/redis/commands/search/commands.py
index 2df2b5a754..6a828c7e62 100644
--- a/redis/commands/search/commands.py
+++ b/redis/commands/search/commands.py
@@ -2,13 +2,15 @@
import time
from typing import Dict, List, Optional, Union
-from redis.client import Pipeline
+from redis.client import NEVER_DECODE, Pipeline
from redis.utils import deprecated_function
from ..helpers import get_protocol_version, parse_to_dict
from ._util import to_string
from .aggregation import AggregateRequest, AggregateResult, Cursor
from .document import Document
+from .field import Field
+from .indexDefinition import IndexDefinition
from .query import Query
from .result import Result
from .suggestion import SuggestionParser
@@ -80,6 +82,7 @@ def _parse_search(self, res, **kwargs):
duration=kwargs["duration"],
has_payload=kwargs["query"]._with_payloads,
with_scores=kwargs["query"]._with_scores,
+ field_encodings=kwargs["query"]._return_fields_decode_as,
)
def _parse_aggregate(self, res, **kwargs):
@@ -151,44 +154,43 @@ def batch_indexer(self, chunk_size=100):
def create_index(
self,
- fields,
- no_term_offsets=False,
- no_field_flags=False,
- stopwords=None,
- definition=None,
+ fields: List[Field],
+ no_term_offsets: bool = False,
+ no_field_flags: bool = False,
+ stopwords: Optional[List[str]] = None,
+ definition: Optional[IndexDefinition] = None,
max_text_fields=False,
temporary=None,
- no_highlight=False,
- no_term_frequencies=False,
- skip_initial_scan=False,
+ no_highlight: bool = False,
+ no_term_frequencies: bool = False,
+ skip_initial_scan: bool = False,
):
"""
- Create the search index. The index must not already exist.
-
- ### Parameters:
-
- - **fields**: a list of TextField or NumericField objects
- - **no_term_offsets**: If true, we will not save term offsets in
- the index
- - **no_field_flags**: If true, we will not save field flags that
- allow searching in specific fields
- - **stopwords**: If not None, we create the index with this custom
- stopword list. The list can be empty
- - **max_text_fields**: If true, we will encode indexes as if there
- were more than 32 text fields which allows you to add additional
- fields (beyond 32).
- - **temporary**: Create a lightweight temporary index which will
- expire after the specified period of inactivity (in seconds). The
- internal idle timer is reset whenever the index is searched or added to.
- - **no_highlight**: If true, disabling highlighting support.
- Also implied by no_term_offsets.
- - **no_term_frequencies**: If true, we avoid saving the term frequencies
- in the index.
- - **skip_initial_scan**: If true, we do not scan and index.
-
- For more information see `FT.CREATE `_.
- """ # noqa
+ Creates the search index. The index must not already exist.
+
+ For more information, see https://redis.io/commands/ft.create/
+
+ Args:
+ fields: A list of Field objects.
+ no_term_offsets: If `true`, term offsets will not be saved in the index.
+ no_field_flags: If true, field flags that allow searching in specific fields
+ will not be saved.
+ stopwords: If provided, the index will be created with this custom stopword
+ list. The list can be empty.
+ definition: If provided, the index will be created with this custom index
+ definition.
+ max_text_fields: If true, indexes will be encoded as if there were more than
+ 32 text fields, allowing for additional fields beyond 32.
+ temporary: Creates a lightweight temporary index which will expire after the
+ specified period of inactivity. The internal idle timer is reset
+ whenever the index is searched or added to.
+ no_highlight: If true, disables highlighting support. Also implied by
+ `no_term_offsets`.
+ no_term_frequencies: If true, term frequencies will not be saved in the
+ index.
+ skip_initial_scan: If true, the initial scan and indexing will be skipped.
+ """
args = [CREATE_CMD, self.index_name]
if definition is not None:
args += definition.args
@@ -498,7 +500,12 @@ def search(
""" # noqa
args, query = self._mk_query_args(query, query_params=query_params)
st = time.time()
- res = self.execute_command(SEARCH_CMD, *args)
+
+ options = {}
+ if get_protocol_version(self.client) not in ["3", 3]:
+ options[NEVER_DECODE] = True
+
+ res = self.execute_command(SEARCH_CMD, *args, **options)
if isinstance(res, Pipeline):
return res
@@ -925,7 +932,12 @@ async def search(
""" # noqa
args, query = self._mk_query_args(query, query_params=query_params)
st = time.time()
- res = await self.execute_command(SEARCH_CMD, *args)
+
+ options = {}
+ if get_protocol_version(self.client) not in ["3", 3]:
+ options[NEVER_DECODE] = True
+
+ res = await self.execute_command(SEARCH_CMD, *args, **options)
if isinstance(res, Pipeline):
return res
diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
index f316ed9f14..8af7777f19 100644
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -4,6 +4,10 @@
class Field:
+ """
+ A class representing a field in a document.
+ """
+
NUMERIC = "NUMERIC"
TEXT = "TEXT"
WEIGHT = "WEIGHT"
@@ -14,6 +18,8 @@ class Field:
NOINDEX = "NOINDEX"
AS = "AS"
GEOSHAPE = "GEOSHAPE"
+ INDEX_MISSING = "INDEXMISSING"
+ INDEX_EMPTY = "INDEXEMPTY"
def __init__(
self,
@@ -21,8 +27,24 @@ def __init__(
args: List[str] = None,
sortable: bool = False,
no_index: bool = False,
+ index_missing: bool = False,
+ index_empty: bool = False,
as_name: str = None,
):
+ """
+ Create a new field object.
+
+ Args:
+ name: The name of the field.
+ args:
+ sortable: If `True`, the field will be sortable.
+ no_index: If `True`, the field will not be indexed.
+ index_missing: If `True`, it will be possible to search for documents that
+ have this field missing.
+ index_empty: If `True`, it will be possible to search for documents that
+ have this field empty.
+ as_name: If provided, this alias will be used for the field.
+ """
if args is None:
args = []
self.name = name
@@ -34,6 +56,10 @@ def __init__(
self.args_suffix.append(Field.SORTABLE)
if no_index:
self.args_suffix.append(Field.NOINDEX)
+ if index_missing:
+ self.args_suffix.append(Field.INDEX_MISSING)
+ if index_empty:
+ self.args_suffix.append(Field.INDEX_EMPTY)
if no_index and not sortable:
raise ValueError("Non-Sortable non-Indexable fields are ignored")
diff --git a/redis/commands/search/query.py b/redis/commands/search/query.py
index 113ddf9da8..84d60a7cec 100644
--- a/redis/commands/search/query.py
+++ b/redis/commands/search/query.py
@@ -35,6 +35,7 @@ def __init__(self, query_string: str) -> None:
self._in_order: bool = False
self._sortby: Optional[SortbyField] = None
self._return_fields: List = []
+ self._return_fields_decode_as: dict = {}
self._summarize_fields: List = []
self._highlight_fields: List = []
self._language: Optional[str] = None
@@ -53,13 +54,27 @@ def limit_ids(self, *ids) -> "Query":
def return_fields(self, *fields) -> "Query":
"""Add fields to return fields."""
- self._return_fields += fields
+ for field in fields:
+ self.return_field(field)
return self
- def return_field(self, field: str, as_field: Optional[str] = None) -> "Query":
- """Add field to return fields (Optional: add 'AS' name
- to the field)."""
+ def return_field(
+ self,
+ field: str,
+ as_field: Optional[str] = None,
+ decode_field: Optional[bool] = True,
+ encoding: Optional[str] = "utf8",
+ ) -> "Query":
+ """
+ Add a field to the list of fields to return.
+
+ - **field**: The field to include in query results
+ - **as_field**: The alias for the field
+ - **decode_field**: Whether to decode the field from bytes to string
+ - **encoding**: The encoding to use when decoding the field
+ """
self._return_fields.append(field)
+ self._return_fields_decode_as[field] = encoding if decode_field else None
if as_field is not None:
self._return_fields += ("AS", as_field)
return self
diff --git a/redis/commands/search/result.py b/redis/commands/search/result.py
index 5b19e6faa4..e2c7efb71f 100644
--- a/redis/commands/search/result.py
+++ b/redis/commands/search/result.py
@@ -1,3 +1,5 @@
+from typing import Optional
+
from ._util import to_string
from .document import Document
@@ -9,11 +11,19 @@ class Result:
"""
def __init__(
- self, res, hascontent, duration=0, has_payload=False, with_scores=False
+ self,
+ res,
+ hascontent,
+ duration=0,
+ has_payload=False,
+ with_scores=False,
+ field_encodings: Optional[dict] = None,
):
"""
- - **snippets**: An optional dictionary of the form
- {field: snippet_size} for snippet formatting
+ - duration: the execution time of the query
+ - has_payload: whether the query has payloads
+ - with_scores: whether the query has scores
+ - field_encodings: a dictionary of field encodings if any is provided
"""
self.total = res[0]
@@ -39,18 +49,22 @@ def __init__(
fields = {}
if hascontent and res[i + fields_offset] is not None:
- fields = (
- dict(
- dict(
- zip(
- map(to_string, res[i + fields_offset][::2]),
- map(to_string, res[i + fields_offset][1::2]),
- )
- )
- )
- if hascontent
- else {}
- )
+ keys = map(to_string, res[i + fields_offset][::2])
+ values = res[i + fields_offset][1::2]
+
+ for key, value in zip(keys, values):
+ if field_encodings is None or key not in field_encodings:
+ fields[key] = to_string(value)
+ continue
+
+ encoding = field_encodings[key]
+
+ # If the encoding is None, we don't need to decode the value
+ if encoding is None:
+ fields[key] = value
+ else:
+ fields[key] = to_string(value, encoding=encoding)
+
try:
del fields["id"]
except KeyError:
diff --git a/redis/commands/timeseries/commands.py b/redis/commands/timeseries/commands.py
index ad137f2df0..4adccd61ac 100644
--- a/redis/commands/timeseries/commands.py
+++ b/redis/commands/timeseries/commands.py
@@ -33,44 +33,67 @@ def create(
labels: Optional[Dict[str, str]] = None,
chunk_size: Optional[int] = None,
duplicate_policy: Optional[str] = None,
+ ignore_max_time_diff: Optional[int] = None,
+ ignore_max_val_diff: Optional[Number] = None,
):
"""
Create a new time-series.
- Args:
+ For more information see https://redis.io/commands/ts.create/
- key:
- time-series key
- retention_msecs:
- Maximum age for samples compared to highest reported timestamp (in milliseconds).
- If None or 0 is passed then the series is not trimmed at all.
- uncompressed:
- Changes data storage from compressed (by default) to uncompressed
- labels:
- Set of label-value pairs that represent metadata labels of the key.
- chunk_size:
- Memory size, in bytes, allocated for each data chunk.
- Must be a multiple of 8 in the range [128 .. 1048576].
- duplicate_policy:
- Policy for handling multiple samples with identical timestamps.
- Can be one of:
- - 'block': an error will occur for any out of order sample.
- - 'first': ignore the new value.
- - 'last': override with latest value.
- - 'min': only override if the value is lower than the existing value.
- - 'max': only override if the value is higher than the existing value.
- - 'sum': If a previous sample exists, add the new sample to it so that \
- the updated value is equal to (previous + new). If no previous sample \
- exists, set the updated value equal to the new value.
-
- For more information: https://redis.io/commands/ts.create/
- """ # noqa
+ Args:
+ key:
+ The time-series key.
+ retention_msecs:
+ Maximum age for samples, compared to the highest reported timestamp in
+ milliseconds. If `None` or `0` is passed, the series is not trimmed at
+ all.
+ uncompressed:
+ Changes data storage from compressed (default) to uncompressed.
+ labels:
+ A dictionary of label-value pairs that represent metadata labels of the
+ key.
+ chunk_size:
+ Memory size, in bytes, allocated for each data chunk. Must be a multiple
+ of 8 in the range `[48..1048576]`. In earlier versions of the module the
+ minimum value was different.
+ duplicate_policy:
+ Policy for handling multiple samples with identical timestamps. Can be
+ one of:
+ - 'block': An error will occur and the new value will be ignored.
+ - 'first': Ignore the new value.
+ - 'last': Override with the latest value.
+ - 'min': Only override if the value is lower than the existing
+ value.
+ - 'max': Only override if the value is higher than the existing
+ value.
+ - 'sum': If a previous sample exists, add the new sample to it so
+ that the updated value is equal to (previous + new). If no
+ previous sample exists, set the updated value equal to the new
+ value.
+ ignore_max_time_diff:
+ A non-negative integer value, in milliseconds, that sets an ignore
+ threshold for added timestamps. If the difference between the last
+ timestamp and the new timestamp is lower than this threshold, the new
+ entry is ignored. Only applicable if `duplicate_policy` is set to
+ `last`, and if `ignore_max_val_diff` is also set. Available since
+ RedisTimeSeries version 1.12.0.
+ ignore_max_val_diff:
+ A non-negative floating point value, that sets an ignore threshold for
+ added values. If the difference between the last value and the new value
+ is lower than this threshold, the new entry is ignored. Only applicable
+ if `duplicate_policy` is set to `last`, and if `ignore_max_time_diff` is
+ also set. Available since RedisTimeSeries version 1.12.0.
+ """
params = [key]
self._append_retention(params, retention_msecs)
self._append_uncompressed(params, uncompressed)
self._append_chunk_size(params, chunk_size)
- self._append_duplicate_policy(params, CREATE_CMD, duplicate_policy)
+ self._append_duplicate_policy(params, duplicate_policy)
self._append_labels(params, labels)
+ self._append_insertion_filters(
+ params, ignore_max_time_diff, ignore_max_val_diff
+ )
return self.execute_command(CREATE_CMD, *params)
@@ -81,42 +104,65 @@ def alter(
labels: Optional[Dict[str, str]] = None,
chunk_size: Optional[int] = None,
duplicate_policy: Optional[str] = None,
+ ignore_max_time_diff: Optional[int] = None,
+ ignore_max_val_diff: Optional[Number] = None,
):
"""
- Update the retention, chunk size, duplicate policy, and labels of an existing
- time series.
+ Update an existing time series.
- Args:
+ For more information see https://redis.io/commands/ts.alter/
- key:
- time-series key
- retention_msecs:
- Maximum retention period, compared to maximal existing timestamp (in milliseconds).
- If None or 0 is passed then the series is not trimmed at all.
- labels:
- Set of label-value pairs that represent metadata labels of the key.
- chunk_size:
- Memory size, in bytes, allocated for each data chunk.
- Must be a multiple of 8 in the range [128 .. 1048576].
- duplicate_policy:
- Policy for handling multiple samples with identical timestamps.
- Can be one of:
- - 'block': an error will occur for any out of order sample.
- - 'first': ignore the new value.
- - 'last': override with latest value.
- - 'min': only override if the value is lower than the existing value.
- - 'max': only override if the value is higher than the existing value.
- - 'sum': If a previous sample exists, add the new sample to it so that \
- the updated value is equal to (previous + new). If no previous sample \
- exists, set the updated value equal to the new value.
-
- For more information: https://redis.io/commands/ts.alter/
- """ # noqa
+ Args:
+ key:
+ The time-series key.
+ retention_msecs:
+ Maximum age for samples, compared to the highest reported timestamp in
+ milliseconds. If `None` or `0` is passed, the series is not trimmed at
+ all.
+ labels:
+ A dictionary of label-value pairs that represent metadata labels of the
+ key.
+ chunk_size:
+ Memory size, in bytes, allocated for each data chunk. Must be a multiple
+ of 8 in the range `[48..1048576]`. In earlier versions of the module the
+ minimum value was different. Changing this value does not affect
+ existing chunks.
+ duplicate_policy:
+ Policy for handling multiple samples with identical timestamps. Can be
+ one of:
+ - 'block': An error will occur and the new value will be ignored.
+ - 'first': Ignore the new value.
+ - 'last': Override with the latest value.
+ - 'min': Only override if the value is lower than the existing
+ value.
+ - 'max': Only override if the value is higher than the existing
+ value.
+ - 'sum': If a previous sample exists, add the new sample to it so
+ that the updated value is equal to (previous + new). If no
+ previous sample exists, set the updated value equal to the new
+ value.
+ ignore_max_time_diff:
+ A non-negative integer value, in milliseconds, that sets an ignore
+ threshold for added timestamps. If the difference between the last
+ timestamp and the new timestamp is lower than this threshold, the new
+ entry is ignored. Only applicable if `duplicate_policy` is set to
+ `last`, and if `ignore_max_val_diff` is also set. Available since
+ RedisTimeSeries version 1.12.0.
+ ignore_max_val_diff:
+ A non-negative floating point value, that sets an ignore threshold for
+ added values. If the difference between the last value and the new value
+ is lower than this threshold, the new entry is ignored. Only applicable
+ if `duplicate_policy` is set to `last`, and if `ignore_max_time_diff` is
+ also set. Available since RedisTimeSeries version 1.12.0.
+ """
params = [key]
self._append_retention(params, retention_msecs)
self._append_chunk_size(params, chunk_size)
- self._append_duplicate_policy(params, ALTER_CMD, duplicate_policy)
+ self._append_duplicate_policy(params, duplicate_policy)
self._append_labels(params, labels)
+ self._append_insertion_filters(
+ params, ignore_max_time_diff, ignore_max_val_diff
+ )
return self.execute_command(ALTER_CMD, *params)
@@ -130,60 +176,104 @@ def add(
labels: Optional[Dict[str, str]] = None,
chunk_size: Optional[int] = None,
duplicate_policy: Optional[str] = None,
+ ignore_max_time_diff: Optional[int] = None,
+ ignore_max_val_diff: Optional[Number] = None,
+ on_duplicate: Optional[str] = None,
):
"""
- Append (or create and append) a new sample to a time series.
+ Append a sample to a time series. When the specified key does not exist, a new
+ time series is created.
- Args:
+ For more information see https://redis.io/commands/ts.add/
- key:
- time-series key
- timestamp:
- Timestamp of the sample. * can be used for automatic timestamp (using the system clock).
- value:
- Numeric data value of the sample
- retention_msecs:
- Maximum retention period, compared to maximal existing timestamp (in milliseconds).
- If None or 0 is passed then the series is not trimmed at all.
- uncompressed:
- Changes data storage from compressed (by default) to uncompressed
- labels:
- Set of label-value pairs that represent metadata labels of the key.
- chunk_size:
- Memory size, in bytes, allocated for each data chunk.
- Must be a multiple of 8 in the range [128 .. 1048576].
- duplicate_policy:
- Policy for handling multiple samples with identical timestamps.
- Can be one of:
- - 'block': an error will occur for any out of order sample.
- - 'first': ignore the new value.
- - 'last': override with latest value.
- - 'min': only override if the value is lower than the existing value.
- - 'max': only override if the value is higher than the existing value.
- - 'sum': If a previous sample exists, add the new sample to it so that \
- the updated value is equal to (previous + new). If no previous sample \
- exists, set the updated value equal to the new value.
-
- For more information: https://redis.io/commands/ts.add/
- """ # noqa
+ Args:
+ key:
+ The time-series key.
+ timestamp:
+ Timestamp of the sample. `*` can be used for automatic timestamp (using
+ the system clock).
+ value:
+ Numeric data value of the sample.
+ retention_msecs:
+ Maximum age for samples, compared to the highest reported timestamp in
+ milliseconds. If `None` or `0` is passed, the series is not trimmed at
+ all.
+ uncompressed:
+ Changes data storage from compressed (default) to uncompressed.
+ labels:
+ A dictionary of label-value pairs that represent metadata labels of the
+ key.
+ chunk_size:
+ Memory size, in bytes, allocated for each data chunk. Must be a multiple
+ of 8 in the range `[48..1048576]`. In earlier versions of the module the
+ minimum value was different.
+ duplicate_policy:
+ Policy for handling multiple samples with identical timestamps. Can be
+ one of:
+ - 'block': An error will occur and the new value will be ignored.
+ - 'first': Ignore the new value.
+ - 'last': Override with the latest value.
+ - 'min': Only override if the value is lower than the existing
+ value.
+ - 'max': Only override if the value is higher than the existing
+ value.
+ - 'sum': If a previous sample exists, add the new sample to it so
+ that the updated value is equal to (previous + new). If no
+ previous sample exists, set the updated value equal to the new
+ value.
+ ignore_max_time_diff:
+ A non-negative integer value, in milliseconds, that sets an ignore
+ threshold for added timestamps. If the difference between the last
+ timestamp and the new timestamp is lower than this threshold, the new
+ entry is ignored. Only applicable if `duplicate_policy` is set to
+ `last`, and if `ignore_max_val_diff` is also set. Available since
+ RedisTimeSeries version 1.12.0.
+ ignore_max_val_diff:
+ A non-negative floating point value, that sets an ignore threshold for
+ added values. If the difference between the last value and the new value
+ is lower than this threshold, the new entry is ignored. Only applicable
+ if `duplicate_policy` is set to `last`, and if `ignore_max_time_diff` is
+ also set. Available since RedisTimeSeries version 1.12.0.
+ on_duplicate:
+ Use a specific duplicate policy for the specified timestamp. Overrides
+ the duplicate policy set by `duplicate_policy`.
+ """
params = [key, timestamp, value]
self._append_retention(params, retention_msecs)
self._append_uncompressed(params, uncompressed)
self._append_chunk_size(params, chunk_size)
- self._append_duplicate_policy(params, ADD_CMD, duplicate_policy)
+ self._append_duplicate_policy(params, duplicate_policy)
self._append_labels(params, labels)
+ self._append_insertion_filters(
+ params, ignore_max_time_diff, ignore_max_val_diff
+ )
+ self._append_on_duplicate(params, on_duplicate)
return self.execute_command(ADD_CMD, *params)
def madd(self, ktv_tuples: List[Tuple[KeyT, Union[int, str], Number]]):
"""
- Append (or create and append) a new `value` to series
- `key` with `timestamp`.
- Expects a list of `tuples` as (`key`,`timestamp`, `value`).
- Return value is an array with timestamps of insertions.
+ Append new samples to one or more time series.
+
+ Each time series must already exist.
+
+ The method expects a list of tuples. Each tuple should contain three elements:
+ (`key`, `timestamp`, `value`). The `value` will be appended to the time series
+ identified by 'key', at the given 'timestamp'.
- For more information: https://redis.io/commands/ts.madd/
- """ # noqa
+ For more information see https://redis.io/commands/ts.madd/
+
+ Args:
+ ktv_tuples:
+ A list of tuples, where each tuple contains:
+ - `key`: The key of the time series.
+ - `timestamp`: The timestamp at which the value should be appended.
+ - `value`: The value to append to the time series.
+
+ Returns:
+ A list that contains, for each sample, either the timestamp that was used,
+ or an error, if the sample could not be added.
+ """
params = []
for ktv in ktv_tuples:
params.extend(ktv)
@@ -199,37 +289,86 @@ def incrby(
uncompressed: Optional[bool] = False,
labels: Optional[Dict[str, str]] = None,
chunk_size: Optional[int] = None,
+ duplicate_policy: Optional[str] = None,
+ ignore_max_time_diff: Optional[int] = None,
+ ignore_max_val_diff: Optional[Number] = None,
):
"""
- Increment (or create an time-series and increment) the latest sample's of a series.
- This command can be used as a counter or gauge that automatically gets history as a time series.
+ Increment the latest sample's of a series. When the specified key does not
+ exist, a new time series is created.
- Args:
+ This command can be used as a counter or gauge that automatically gets history
+ as a time series.
+
+ For more information see https://redis.io/commands/ts.incrby/
- key:
- time-series key
- value:
- Numeric data value of the sample
- timestamp:
- Timestamp of the sample. * can be used for automatic timestamp (using the system clock).
- retention_msecs:
- Maximum age for samples compared to last event time (in milliseconds).
- If None or 0 is passed then the series is not trimmed at all.
- uncompressed:
- Changes data storage from compressed (by default) to uncompressed
- labels:
- Set of label-value pairs that represent metadata labels of the key.
- chunk_size:
- Memory size, in bytes, allocated for each data chunk.
-
- For more information: https://redis.io/commands/ts.incrby/
- """ # noqa
+ Args:
+ key:
+ The time-series key.
+ value:
+ Numeric value to be added (addend).
+ timestamp:
+ Timestamp of the sample. `*` can be used for automatic timestamp (using
+ the system clock). `timestamp` must be equal to or higher than the
+ maximum existing timestamp in the series. When equal, the value of the
+ sample with the maximum existing timestamp is increased. If it is
+ higher, a new sample with a timestamp set to `timestamp` is created, and
+ its value is set to the value of the sample with the maximum existing
+ timestamp plus the addend.
+ retention_msecs:
+ Maximum age for samples, compared to the highest reported timestamp in
+ milliseconds. If `None` or `0` is passed, the series is not trimmed at
+ all.
+ uncompressed:
+ Changes data storage from compressed (default) to uncompressed.
+ labels:
+ A dictionary of label-value pairs that represent metadata labels of the
+ key.
+ chunk_size:
+ Memory size, in bytes, allocated for each data chunk. Must be a multiple
+ of 8 in the range `[48..1048576]`. In earlier versions of the module the
+ minimum value was different.
+ duplicate_policy:
+ Policy for handling multiple samples with identical timestamps. Can be
+ one of:
+ - 'block': An error will occur and the new value will be ignored.
+ - 'first': Ignore the new value.
+ - 'last': Override with the latest value.
+ - 'min': Only override if the value is lower than the existing
+ value.
+ - 'max': Only override if the value is higher than the existing
+ value.
+ - 'sum': If a previous sample exists, add the new sample to it so
+ that the updated value is equal to (previous + new). If no
+ previous sample exists, set the updated value equal to the new
+ value.
+ ignore_max_time_diff:
+ A non-negative integer value, in milliseconds, that sets an ignore
+ threshold for added timestamps. If the difference between the last
+ timestamp and the new timestamp is lower than this threshold, the new
+ entry is ignored. Only applicable if `duplicate_policy` is set to
+ `last`, and if `ignore_max_val_diff` is also set. Available since
+ RedisTimeSeries version 1.12.0.
+ ignore_max_val_diff:
+ A non-negative floating point value, that sets an ignore threshold for
+ added values. If the difference between the last value and the new value
+ is lower than this threshold, the new entry is ignored. Only applicable
+ if `duplicate_policy` is set to `last`, and if `ignore_max_time_diff` is
+ also set. Available since RedisTimeSeries version 1.12.0.
+
+ Returns:
+ The timestamp of the sample that was modified or added.
+ """
params = [key, value]
self._append_timestamp(params, timestamp)
self._append_retention(params, retention_msecs)
self._append_uncompressed(params, uncompressed)
self._append_chunk_size(params, chunk_size)
+ self._append_duplicate_policy(params, duplicate_policy)
self._append_labels(params, labels)
+ self._append_insertion_filters(
+ params, ignore_max_time_diff, ignore_max_val_diff
+ )
return self.execute_command(INCRBY_CMD, *params)
@@ -242,37 +381,86 @@ def decrby(
uncompressed: Optional[bool] = False,
labels: Optional[Dict[str, str]] = None,
chunk_size: Optional[int] = None,
+ duplicate_policy: Optional[str] = None,
+ ignore_max_time_diff: Optional[int] = None,
+ ignore_max_val_diff: Optional[Number] = None,
):
"""
- Decrement (or create an time-series and decrement) the latest sample's of a series.
- This command can be used as a counter or gauge that automatically gets history as a time series.
+ Decrement the latest sample's of a series. When the specified key does not
+ exist, a new time series is created.
- Args:
+ This command can be used as a counter or gauge that automatically gets history
+ as a time series.
- key:
- time-series key
- value:
- Numeric data value of the sample
- timestamp:
- Timestamp of the sample. * can be used for automatic timestamp (using the system clock).
- retention_msecs:
- Maximum age for samples compared to last event time (in milliseconds).
- If None or 0 is passed then the series is not trimmed at all.
- uncompressed:
- Changes data storage from compressed (by default) to uncompressed
- labels:
- Set of label-value pairs that represent metadata labels of the key.
- chunk_size:
- Memory size, in bytes, allocated for each data chunk.
-
- For more information: https://redis.io/commands/ts.decrby/
- """ # noqa
+ For more information see https://redis.io/commands/ts.decrby/
+
+ Args:
+ key:
+ The time-series key.
+ value:
+ Numeric value to subtract (subtrahend).
+ timestamp:
+ Timestamp of the sample. `*` can be used for automatic timestamp (using
+ the system clock). `timestamp` must be equal to or higher than the
+ maximum existing timestamp in the series. When equal, the value of the
+ sample with the maximum existing timestamp is decreased. If it is
+ higher, a new sample with a timestamp set to `timestamp` is created, and
+ its value is set to the value of the sample with the maximum existing
+ timestamp minus subtrahend.
+ retention_msecs:
+ Maximum age for samples, compared to the highest reported timestamp in
+ milliseconds. If `None` or `0` is passed, the series is not trimmed at
+ all.
+ uncompressed:
+ Changes data storage from compressed (default) to uncompressed.
+ labels:
+ A dictionary of label-value pairs that represent metadata labels of the
+ key.
+ chunk_size:
+ Memory size, in bytes, allocated for each data chunk. Must be a multiple
+ of 8 in the range `[48..1048576]`. In earlier versions of the module the
+ minimum value was different.
+ duplicate_policy:
+ Policy for handling multiple samples with identical timestamps. Can be
+ one of:
+ - 'block': An error will occur and the new value will be ignored.
+ - 'first': Ignore the new value.
+ - 'last': Override with the latest value.
+ - 'min': Only override if the value is lower than the existing
+ value.
+ - 'max': Only override if the value is higher than the existing
+ value.
+ - 'sum': If a previous sample exists, add the new sample to it so
+ that the updated value is equal to (previous + new). If no
+ previous sample exists, set the updated value equal to the new
+ value.
+ ignore_max_time_diff:
+ A non-negative integer value, in milliseconds, that sets an ignore
+ threshold for added timestamps. If the difference between the last
+ timestamp and the new timestamp is lower than this threshold, the new
+ entry is ignored. Only applicable if `duplicate_policy` is set to
+ `last`, and if `ignore_max_val_diff` is also set. Available since
+ RedisTimeSeries version 1.12.0.
+ ignore_max_val_diff:
+ A non-negative floating point value, that sets an ignore threshold for
+ added values. If the difference between the last value and the new value
+ is lower than this threshold, the new entry is ignored. Only applicable
+ if `duplicate_policy` is set to `last`, and if `ignore_max_time_diff` is
+ also set. Available since RedisTimeSeries version 1.12.0.
+
+ Returns:
+ The timestamp of the sample that was modified or added.
+ """
params = [key, value]
self._append_timestamp(params, timestamp)
self._append_retention(params, retention_msecs)
self._append_uncompressed(params, uncompressed)
self._append_chunk_size(params, chunk_size)
+ self._append_duplicate_policy(params, duplicate_policy)
self._append_labels(params, labels)
+ self._append_insertion_filters(
+ params, ignore_max_time_diff, ignore_max_val_diff
+ )
return self.execute_command(DECRBY_CMD, *params)
@@ -280,17 +468,22 @@ def delete(self, key: KeyT, from_time: int, to_time: int):
"""
Delete all samples between two timestamps for a given time series.
- Args:
+ The given timestamp interval is closed (inclusive), meaning that samples whose
+ timestamp equals `from_time` or `to_time` are also deleted.
- key:
- time-series key.
- from_time:
- Start timestamp for the range deletion.
- to_time:
- End timestamp for the range deletion.
+ For more information see https://redis.io/commands/ts.del/
- For more information: https://redis.io/commands/ts.del/
- """ # noqa
+ Args:
+ key:
+ The time-series key.
+ from_time:
+ Start timestamp for the range deletion.
+ to_time:
+ End timestamp for the range deletion.
+
+ Returns:
+ The number of samples deleted.
+ """
return self.execute_command(DEL_CMD, key, from_time, to_time)
def createrule(
@@ -304,24 +497,23 @@ def createrule(
"""
Create a compaction rule from values added to `source_key` into `dest_key`.
- Args:
+ For more information see https://redis.io/commands/ts.createrule/
- source_key:
- Key name for source time series
- dest_key:
- Key name for destination (compacted) time series
- aggregation_type:
- Aggregation type: One of the following:
- [`avg`, `sum`, `min`, `max`, `range`, `count`, `first`, `last`, `std.p`,
- `std.s`, `var.p`, `var.s`, `twa`]
- bucket_size_msec:
- Duration of each bucket, in milliseconds
- align_timestamp:
- Assure that there is a bucket that starts at exactly align_timestamp and
- align all other buckets accordingly.
-
- For more information: https://redis.io/commands/ts.createrule/
- """ # noqa
+ Args:
+ source_key:
+ Key name for source time series.
+ dest_key:
+ Key name for destination (compacted) time series.
+ aggregation_type:
+ Aggregation type: One of the following:
+ [`avg`, `sum`, `min`, `max`, `range`, `count`, `first`, `last`, `std.p`,
+ `std.s`, `var.p`, `var.s`, `twa`]
+ bucket_size_msec:
+ Duration of each bucket, in milliseconds.
+ align_timestamp:
+ Assure that there is a bucket that starts at exactly align_timestamp and
+ align all other buckets accordingly.
+ """
params = [source_key, dest_key]
self._append_aggregation(params, aggregation_type, bucket_size_msec)
if align_timestamp is not None:
@@ -331,10 +523,10 @@ def createrule(
def deleterule(self, source_key: KeyT, dest_key: KeyT):
"""
- Delete a compaction rule from `source_key` to `dest_key`..
+ Delete a compaction rule from `source_key` to `dest_key`.
- For more information: https://redis.io/commands/ts.deleterule/
- """ # noqa
+ For more information see https://redis.io/commands/ts.deleterule/
+ """
return self.execute_command(DELETERULE_CMD, source_key, dest_key)
def __range_params(
@@ -383,42 +575,46 @@ def range(
empty: Optional[bool] = False,
):
"""
- Query a range in forward direction for a specific time-serie.
+ Query a range in forward direction for a specific time-series.
- Args:
+ For more information see https://redis.io/commands/ts.range/
- key:
- Key name for timeseries.
- from_time:
- Start timestamp for the range query. - can be used to express the minimum possible timestamp (0).
- to_time:
- End timestamp for range query, + can be used to express the maximum possible timestamp.
- count:
- Limits the number of returned samples.
- aggregation_type:
- Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
- `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`, `twa`]
- bucket_size_msec:
- Time bucket for aggregation in milliseconds.
- filter_by_ts:
- List of timestamps to filter the result by specific timestamps.
- filter_by_min_value:
- Filter result by minimum value (must mention also filter by_max_value).
- filter_by_max_value:
- Filter result by maximum value (must mention also filter by_min_value).
- align:
- Timestamp for alignment control for aggregation.
- latest:
- Used when a time series is a compaction, reports the compacted value of the
- latest possibly partial bucket
- bucket_timestamp:
- Controls how bucket timestamps are reported. Can be one of [`-`, `low`, `+`,
- `high`, `~`, `mid`].
- empty:
- Reports aggregations for empty buckets.
-
- For more information: https://redis.io/commands/ts.range/
- """ # noqa
+ Args:
+ key:
+ Key name for timeseries.
+ from_time:
+ Start timestamp for the range query. `-` can be used to express the
+ minimum possible timestamp (0).
+ to_time:
+ End timestamp for range query, `+` can be used to express the maximum
+ possible timestamp.
+ count:
+ Limits the number of returned samples.
+ aggregation_type:
+ Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
+ `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`,
+ `twa`]
+ bucket_size_msec:
+ Time bucket for aggregation in milliseconds.
+ filter_by_ts:
+ List of timestamps to filter the result by specific timestamps.
+ filter_by_min_value:
+ Filter result by minimum value (must mention also
+ `filter by_max_value`).
+ filter_by_max_value:
+ Filter result by maximum value (must mention also
+ `filter by_min_value`).
+ align:
+ Timestamp for alignment control for aggregation.
+ latest:
+ Used when a time series is a compaction, reports the compacted value of
+ the latest possibly partial bucket.
+ bucket_timestamp:
+ Controls how bucket timestamps are reported. Can be one of [`-`, `low`,
+ `+`, `high`, `~`, `mid`].
+ empty:
+ Reports aggregations for empty buckets.
+ """
params = self.__range_params(
key,
from_time,
@@ -457,40 +653,44 @@ def revrange(
**Note**: This command is only available since RedisTimeSeries >= v1.4
- Args:
+ For more information see https://redis.io/commands/ts.revrange/
- key:
- Key name for timeseries.
- from_time:
- Start timestamp for the range query. - can be used to express the minimum possible timestamp (0).
- to_time:
- End timestamp for range query, + can be used to express the maximum possible timestamp.
- count:
- Limits the number of returned samples.
- aggregation_type:
- Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
- `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`, `twa`]
- bucket_size_msec:
- Time bucket for aggregation in milliseconds.
- filter_by_ts:
- List of timestamps to filter the result by specific timestamps.
- filter_by_min_value:
- Filter result by minimum value (must mention also filter_by_max_value).
- filter_by_max_value:
- Filter result by maximum value (must mention also filter_by_min_value).
- align:
- Timestamp for alignment control for aggregation.
- latest:
- Used when a time series is a compaction, reports the compacted value of the
- latest possibly partial bucket
- bucket_timestamp:
- Controls how bucket timestamps are reported. Can be one of [`-`, `low`, `+`,
- `high`, `~`, `mid`].
- empty:
- Reports aggregations for empty buckets.
-
- For more information: https://redis.io/commands/ts.revrange/
- """ # noqa
+ Args:
+ key:
+ Key name for timeseries.
+ from_time:
+ Start timestamp for the range query. `-` can be used to express the
+ minimum possible timestamp (0).
+ to_time:
+ End timestamp for range query, `+` can be used to express the maximum
+ possible timestamp.
+ count:
+ Limits the number of returned samples.
+ aggregation_type:
+ Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
+ `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`,
+ `twa`]
+ bucket_size_msec:
+ Time bucket for aggregation in milliseconds.
+ filter_by_ts:
+ List of timestamps to filter the result by specific timestamps.
+ filter_by_min_value:
+ Filter result by minimum value (must mention also
+ `filter_by_max_value`).
+ filter_by_max_value:
+ Filter result by maximum value (must mention also
+ `filter_by_min_value`).
+ align:
+ Timestamp for alignment control for aggregation.
+ latest:
+ Used when a time series is a compaction, reports the compacted value of
+ the latest possibly partial bucket.
+ bucket_timestamp:
+ Controls how bucket timestamps are reported. Can be one of [`-`, `low`,
+ `+`, `high`, `~`, `mid`].
+ empty:
+ Reports aggregations for empty buckets.
+ """
params = self.__range_params(
key,
from_time,
@@ -567,49 +767,55 @@ def mrange(
"""
Query a range across multiple time-series by filters in forward direction.
- Args:
+ For more information see https://redis.io/commands/ts.mrange/
- from_time:
- Start timestamp for the range query. `-` can be used to express the minimum possible timestamp (0).
- to_time:
- End timestamp for range query, `+` can be used to express the maximum possible timestamp.
- filters:
- filter to match the time-series labels.
- count:
- Limits the number of returned samples.
- aggregation_type:
- Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
- `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`, `twa`]
- bucket_size_msec:
- Time bucket for aggregation in milliseconds.
- with_labels:
- Include in the reply all label-value pairs representing metadata labels of the time series.
- filter_by_ts:
- List of timestamps to filter the result by specific timestamps.
- filter_by_min_value:
- Filter result by minimum value (must mention also filter_by_max_value).
- filter_by_max_value:
- Filter result by maximum value (must mention also filter_by_min_value).
- groupby:
- Grouping by fields the results (must mention also reduce).
- reduce:
- Applying reducer functions on each group. Can be one of [`avg` `sum`, `min`,
- `max`, `range`, `count`, `std.p`, `std.s`, `var.p`, `var.s`].
- select_labels:
- Include in the reply only a subset of the key-value pair labels of a series.
- align:
- Timestamp for alignment control for aggregation.
- latest:
- Used when a time series is a compaction, reports the compacted
- value of the latest possibly partial bucket
- bucket_timestamp:
- Controls how bucket timestamps are reported. Can be one of [`-`, `low`, `+`,
- `high`, `~`, `mid`].
- empty:
- Reports aggregations for empty buckets.
-
- For more information: https://redis.io/commands/ts.mrange/
- """ # noqa
+ Args:
+ from_time:
+ Start timestamp for the range query. `-` can be used to express the
+ minimum possible timestamp (0).
+ to_time:
+ End timestamp for range query, `+` can be used to express the maximum
+ possible timestamp.
+ filters:
+ Filter to match the time-series labels.
+ count:
+ Limits the number of returned samples.
+ aggregation_type:
+ Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
+ `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`,
+ `twa`]
+ bucket_size_msec:
+ Time bucket for aggregation in milliseconds.
+ with_labels:
+ Include in the reply all label-value pairs representing metadata labels
+ of the time series.
+ filter_by_ts:
+ List of timestamps to filter the result by specific timestamps.
+ filter_by_min_value:
+ Filter result by minimum value (must mention also
+ `filter_by_max_value`).
+ filter_by_max_value:
+ Filter result by maximum value (must mention also
+ `filter_by_min_value`).
+ groupby:
+ Grouping by fields the results (must mention also `reduce`).
+ reduce:
+ Applying reducer functions on each group. Can be one of [`avg` `sum`,
+ `min`, `max`, `range`, `count`, `std.p`, `std.s`, `var.p`, `var.s`].
+ select_labels:
+ Include in the reply only a subset of the key-value pair labels of a
+ series.
+ align:
+ Timestamp for alignment control for aggregation.
+ latest:
+ Used when a time series is a compaction, reports the compacted value of
+ the latest possibly partial bucket.
+ bucket_timestamp:
+ Controls how bucket timestamps are reported. Can be one of [`-`, `low`,
+ `+`, `high`, `~`, `mid`].
+ empty:
+ Reports aggregations for empty buckets.
+ """
params = self.__mrange_params(
aggregation_type,
bucket_size_msec,
@@ -655,49 +861,55 @@ def mrevrange(
"""
Query a range across multiple time-series by filters in reverse direction.
- Args:
+ For more information see https://redis.io/commands/ts.mrevrange/
- from_time:
- Start timestamp for the range query. - can be used to express the minimum possible timestamp (0).
- to_time:
- End timestamp for range query, + can be used to express the maximum possible timestamp.
- filters:
- Filter to match the time-series labels.
- count:
- Limits the number of returned samples.
- aggregation_type:
- Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
- `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`, `twa`]
- bucket_size_msec:
- Time bucket for aggregation in milliseconds.
- with_labels:
- Include in the reply all label-value pairs representing metadata labels of the time series.
- filter_by_ts:
- List of timestamps to filter the result by specific timestamps.
- filter_by_min_value:
- Filter result by minimum value (must mention also filter_by_max_value).
- filter_by_max_value:
- Filter result by maximum value (must mention also filter_by_min_value).
- groupby:
- Grouping by fields the results (must mention also reduce).
- reduce:
- Applying reducer functions on each group. Can be one of [`avg` `sum`, `min`,
- `max`, `range`, `count`, `std.p`, `std.s`, `var.p`, `var.s`].
- select_labels:
- Include in the reply only a subset of the key-value pair labels of a series.
- align:
- Timestamp for alignment control for aggregation.
- latest:
- Used when a time series is a compaction, reports the compacted
- value of the latest possibly partial bucket
- bucket_timestamp:
- Controls how bucket timestamps are reported. Can be one of [`-`, `low`, `+`,
- `high`, `~`, `mid`].
- empty:
- Reports aggregations for empty buckets.
-
- For more information: https://redis.io/commands/ts.mrevrange/
- """ # noqa
+ Args:
+ from_time:
+ Start timestamp for the range query. '-' can be used to express the
+ minimum possible timestamp (0).
+ to_time:
+ End timestamp for range query, '+' can be used to express the maximum
+ possible timestamp.
+ filters:
+ Filter to match the time-series labels.
+ count:
+ Limits the number of returned samples.
+ aggregation_type:
+ Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
+ `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`,
+ `twa`].
+ bucket_size_msec:
+ Time bucket for aggregation in milliseconds.
+ with_labels:
+ Include in the reply all label-value pairs representing metadata labels
+ of the time series.
+ filter_by_ts:
+ List of timestamps to filter the result by specific timestamps.
+ filter_by_min_value:
+ Filter result by minimum value (must mention also
+ `filter_by_max_value`).
+ filter_by_max_value:
+ Filter result by maximum value (must mention also
+ `filter_by_min_value`).
+ groupby:
+ Grouping by fields the results (must mention also `reduce`).
+ reduce:
+ Applying reducer functions on each group. Can be one of [`avg` `sum`,
+ `min`, `max`, `range`, `count`, `std.p`, `std.s`, `var.p`, `var.s`].
+ select_labels:
+ Include in the reply only a subset of the key-value pair labels of a
+ series.
+ align:
+ Timestamp for alignment control for aggregation.
+ latest:
+ Used when a time series is a compaction, reports the compacted value of
+ the latest possibly partial bucket.
+ bucket_timestamp:
+ Controls how bucket timestamps are reported. Can be one of [`-`, `low`,
+ `+`, `high`, `~`, `mid`].
+ empty:
+ Reports aggregations for empty buckets.
+ """
params = self.__mrange_params(
aggregation_type,
bucket_size_msec,
@@ -721,13 +933,16 @@ def mrevrange(
return self.execute_command(MREVRANGE_CMD, *params)
def get(self, key: KeyT, latest: Optional[bool] = False):
- """# noqa
+ """
Get the last sample of `key`.
- `latest` used when a time series is a compaction, reports the compacted
- value of the latest (possibly partial) bucket
- For more information: https://redis.io/commands/ts.get/
- """ # noqa
+ For more information see https://redis.io/commands/ts.get/
+
+ Args:
+ latest:
+ Used when a time series is a compaction, reports the compacted value of
+ the latest (possibly partial) bucket.
+ """
params = [key]
self._append_latest(params, latest)
return self.execute_command(GET_CMD, *params)
@@ -739,24 +954,24 @@ def mget(
select_labels: Optional[List[str]] = None,
latest: Optional[bool] = False,
):
- """# noqa
+ """
Get the last samples matching the specific `filter`.
- Args:
+ For more information see https://redis.io/commands/ts.mget/
- filters:
- Filter to match the time-series labels.
- with_labels:
- Include in the reply all label-value pairs representing metadata
- labels of the time series.
- select_labels:
- Include in the reply only a subset of the key-value pair labels of a series.
- latest:
- Used when a time series is a compaction, reports the compacted
- value of the latest possibly partial bucket
-
- For more information: https://redis.io/commands/ts.mget/
- """ # noqa
+ Args:
+ filters:
+ Filter to match the time-series labels.
+ with_labels:
+ Include in the reply all label-value pairs representing metadata labels
+ of the time series.
+ select_labels:
+ Include in the reply only a subset of the key-value pair labels o the
+ time series.
+ latest:
+ Used when a time series is a compaction, reports the compacted value of
+ the latest possibly partial bucket.
+ """
params = []
self._append_latest(params, latest)
self._append_with_labels(params, with_labels, select_labels)
@@ -765,26 +980,26 @@ def mget(
return self.execute_command(MGET_CMD, *params)
def info(self, key: KeyT):
- """# noqa
+ """
Get information of `key`.
- For more information: https://redis.io/commands/ts.info/
- """ # noqa
+ For more information see https://redis.io/commands/ts.info/
+ """
return self.execute_command(INFO_CMD, key)
def queryindex(self, filters: List[str]):
- """# noqa
+ """
Get all time series keys matching the `filter` list.
- For more information: https://redis.io/commands/ts.queryindex/
- """ # noq
+ For more information see https://redis.io/commands/ts.queryindex/
+ """
return self.execute_command(QUERYINDEX_CMD, *filters)
@staticmethod
def _append_uncompressed(params: List[str], uncompressed: Optional[bool]):
"""Append UNCOMPRESSED tag to params."""
if uncompressed:
- params.extend(["UNCOMPRESSED"])
+ params.extend(["ENCODING", "UNCOMPRESSED"])
@staticmethod
def _append_with_labels(
@@ -860,17 +1075,16 @@ def _append_chunk_size(params: List[str], chunk_size: Optional[int]):
params.extend(["CHUNK_SIZE", chunk_size])
@staticmethod
- def _append_duplicate_policy(
- params: List[str], command: Optional[str], duplicate_policy: Optional[str]
- ):
- """Append DUPLICATE_POLICY property to params on CREATE
- and ON_DUPLICATE on ADD.
- """
+ def _append_duplicate_policy(params: List[str], duplicate_policy: Optional[str]):
+ """Append DUPLICATE_POLICY property to params."""
if duplicate_policy is not None:
- if command == "TS.ADD":
- params.extend(["ON_DUPLICATE", duplicate_policy])
- else:
- params.extend(["DUPLICATE_POLICY", duplicate_policy])
+ params.extend(["DUPLICATE_POLICY", duplicate_policy])
+
+ @staticmethod
+ def _append_on_duplicate(params: List[str], on_duplicate: Optional[str]):
+ """Append ON_DUPLICATE property to params."""
+ if on_duplicate is not None:
+ params.extend(["ON_DUPLICATE", on_duplicate])
@staticmethod
def _append_filer_by_ts(params: List[str], ts_list: Optional[List[int]]):
@@ -903,3 +1117,20 @@ def _append_empty(params: List[str], empty: Optional[bool]):
"""Append EMPTY property to params."""
if empty:
params.append("EMPTY")
+
+ @staticmethod
+ def _append_insertion_filters(
+ params: List[str],
+ ignore_max_time_diff: Optional[int] = None,
+ ignore_max_val_diff: Optional[Number] = None,
+ ):
+ """Append insertion filters to params."""
+ if (ignore_max_time_diff is None) != (ignore_max_val_diff is None):
+ raise ValueError(
+ "Both ignore_max_time_diff and ignore_max_val_diff must be set."
+ )
+
+ if ignore_max_time_diff is not None and ignore_max_val_diff is not None:
+ params.extend(
+ ["IGNORE", str(ignore_max_time_diff), str(ignore_max_val_diff)]
+ )
diff --git a/redis/commands/timeseries/info.py b/redis/commands/timeseries/info.py
index 3a384dc049..d86b92ace2 100644
--- a/redis/commands/timeseries/info.py
+++ b/redis/commands/timeseries/info.py
@@ -6,7 +6,7 @@ class TSInfo:
"""
Hold information and statistics on the time-series.
Can be created using ``tsinfo`` command
- https://oss.redis.com/redistimeseries/commands/#tsinfo.
+ https://redis.io/docs/latest/commands/ts.info/
"""
rules = []
@@ -57,7 +57,7 @@ def __init__(self, args):
Policy that will define handling of duplicate samples.
Can read more about on
- https://oss.redis.com/redistimeseries/configuration/#duplicate_policy
+ https://redis.io/docs/latest/develop/data-types/timeseries/configuration/#duplicate_policy
"""
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.rules = response.get("rules")
diff --git a/redis/connection.py b/redis/connection.py
index 8645656df3..4a1b8ec85d 100644
--- a/redis/connection.py
+++ b/redis/connection.py
@@ -31,6 +31,7 @@
HIREDIS_AVAILABLE,
HIREDIS_PACK_AVAILABLE,
SSL_AVAILABLE,
+ format_error_message,
get_lib_version,
str_if_bytes,
)
@@ -217,7 +218,7 @@ def __init__(
def __repr__(self):
repr_args = ",".join([f"{k}={v}" for k, v in self.repr_pieces()])
- return f"{self.__class__.__name__}<{repr_args}>"
+ return f"<{self.__class__.__module__}.{self.__class__.__name__}({repr_args})>"
@abstractmethod
def repr_pieces(self):
@@ -311,9 +312,8 @@ def _connect(self):
def _host_error(self):
pass
- @abstractmethod
def _error_message(self, exception):
- pass
+ return format_error_message(self._host_error(), exception)
def on_connect(self):
"Initialize the connection, authenticate and select a database"
@@ -642,27 +642,6 @@ def _connect(self):
def _host_error(self):
return f"{self.host}:{self.port}"
- def _error_message(self, exception):
- # args for socket.error can either be (errno, "message")
- # or just "message"
-
- host_error = self._host_error()
-
- if len(exception.args) == 1:
- try:
- return f"Error connecting to {host_error}. \
- {exception.args[0]}."
- except AttributeError:
- return f"Connection Error: {exception.args[0]}"
- else:
- try:
- return (
- f"Error {exception.args[0]} connecting to "
- f"{host_error}. {exception.args[1]}."
- )
- except AttributeError:
- return f"Connection Error: {exception.args[0]}"
-
class SSLConnection(Connection):
"""Manages SSL connections to and from the Redis server(s).
@@ -839,20 +818,6 @@ def _connect(self):
def _host_error(self):
return self.path
- def _error_message(self, exception):
- # args for socket.error can either be (errno, "message")
- # or just "message"
- host_error = self._host_error()
- if len(exception.args) == 1:
- return (
- f"Error connecting to unix socket: {host_error}. {exception.args[0]}."
- )
- else:
- return (
- f"Error {exception.args[0]} connecting to unix socket: "
- f"{host_error}. {exception.args[1]}."
- )
-
FALSE_STRINGS = ("0", "F", "FALSE", "N", "NO")
@@ -1026,8 +991,8 @@ def __init__(
def __repr__(self) -> (str, str):
return (
- f"{type(self).__name__}"
- f"<{repr(self.connection_class(**self.connection_kwargs))}>"
+ f"<{type(self).__module__}.{type(self).__name__}"
+ f"({repr(self.connection_class(**self.connection_kwargs))})>"
)
def reset(self) -> None:
diff --git a/redis/retry.py b/redis/retry.py
index 606443053e..03fd973c4c 100644
--- a/redis/retry.py
+++ b/redis/retry.py
@@ -1,17 +1,27 @@
import socket
from time import sleep
+from typing import TYPE_CHECKING, Any, Callable, Iterable, Tuple, Type, TypeVar
from redis.exceptions import ConnectionError, TimeoutError
+T = TypeVar("T")
+
+if TYPE_CHECKING:
+ from redis.backoff import AbstractBackoff
+
class Retry:
"""Retry a specific number of times after a failure"""
def __init__(
self,
- backoff,
- retries,
- supported_errors=(ConnectionError, TimeoutError, socket.timeout),
+ backoff: "AbstractBackoff",
+ retries: int,
+ supported_errors: Tuple[Type[Exception], ...] = (
+ ConnectionError,
+ TimeoutError,
+ socket.timeout,
+ ),
):
"""
Initialize a `Retry` object with a `Backoff` object
@@ -24,7 +34,9 @@ def __init__(
self._retries = retries
self._supported_errors = supported_errors
- def update_supported_errors(self, specified_errors: list):
+ def update_supported_errors(
+ self, specified_errors: Iterable[Type[Exception]]
+ ) -> None:
"""
Updates the supported errors with the specified error types
"""
@@ -32,7 +44,11 @@ def update_supported_errors(self, specified_errors: list):
set(self._supported_errors + tuple(specified_errors))
)
- def call_with_retry(self, do, fail):
+ def call_with_retry(
+ self,
+ do: Callable[[], T],
+ fail: Callable[[Exception], Any],
+ ) -> T:
"""
Execute an operation that might fail and returns its result, or
raise the exception that was thrown depending on the `Backoff` object.
diff --git a/redis/sentinel.py b/redis/sentinel.py
index 41f308d1ee..bc56fabdf3 100644
--- a/redis/sentinel.py
+++ b/redis/sentinel.py
@@ -24,7 +24,10 @@ def __init__(self, **kwargs):
def __repr__(self):
pool = self.connection_pool
- s = f"{type(self).__name__}"
+ s = (
+ f"<{type(self).__module__}.{type(self).__name__}"
+ f"(service={pool.service_name}%s)>"
+ )
if self.host:
host_info = f",host={self.host},port={self.port}"
s = s % host_info
@@ -162,7 +165,10 @@ def __init__(self, service_name, sentinel_manager, **kwargs):
def __repr__(self):
role = "master" if self.is_master else "slave"
- return f"{type(self).__name__}"
+ )
def reset(self):
super().reset()
@@ -261,7 +267,10 @@ def __repr__(self):
sentinel_addresses.append(
"{host}:{port}".format_map(sentinel.connection_pool.connection_kwargs)
)
- return f'{type(self).__name__}'
+ return (
+ f"<{type(self).__module__}.{type(self).__name__}"
+ f'(sentinels=[{",".join(sentinel_addresses)}])>'
+ )
def check_master_state(self, state, service_name):
if not state["is_master"] or state["is_sdown"] or state["is_odown"]:
diff --git a/redis/typing.py b/redis/typing.py
index 56a1e99ba7..749e5e82ed 100644
--- a/redis/typing.py
+++ b/redis/typing.py
@@ -33,6 +33,7 @@
PatternT = _StringLikeT # Patterns matched against keys, fields etc
FieldT = EncodableT # Fields within hash tables, streams and geo commands
KeysT = Union[KeyT, Iterable[KeyT]]
+ResponseT = Union[Awaitable[Any], Any]
ChannelT = _StringLikeT
GroupT = _StringLikeT # Consumer group
ConsumerT = _StringLikeT # Consumer name
diff --git a/redis/utils.py b/redis/utils.py
index 01fdfed7a2..2bf2a85a29 100644
--- a/redis/utils.py
+++ b/redis/utils.py
@@ -145,3 +145,15 @@ def get_lib_version():
except metadata.PackageNotFoundError:
libver = "99.99.99"
return libver
+
+
+def format_error_message(host_error: str, exception: BaseException) -> str:
+ if not exception.args:
+ return f"Error connecting to {host_error}."
+ elif len(exception.args) == 1:
+ return f"Error {exception.args[0]} connecting to {host_error}."
+ else:
+ return (
+ f"Error {exception.args[0]} connecting to {host_error}. "
+ f"{exception.args[1]}."
+ )
diff --git a/tests/conftest.py b/tests/conftest.py
index bad9f43e42..dd60a8ecd5 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -17,7 +17,7 @@
REDIS_INFO = {}
default_redis_url = "redis://localhost:6379/0"
default_protocol = "2"
-default_redismod_url = "redis://localhost:6379"
+default_redismod_url = "redis://localhost:6479"
# default ssl client ignores verification for the purpose of testing
default_redis_ssl_url = "rediss://localhost:6666"
@@ -121,6 +121,8 @@ def _get_info(redis_url):
def pytest_sessionstart(session):
# during test discovery, e.g. with VS Code, we may not
# have a server running.
+ protocol = session.config.getoption("--protocol")
+ REDIS_INFO["resp_version"] = int(protocol) if protocol else None
redis_url = session.config.getoption("--redis-url")
try:
info = _get_info(redis_url)
@@ -143,8 +145,12 @@ def pytest_sessionstart(session):
session.config.REDIS_INFO = REDIS_INFO
# module info
+ stack_url = redis_url
+ if stack_url == default_redis_url:
+ stack_url = default_redismod_url
try:
- REDIS_INFO["modules"] = info["modules"]
+ stack_info = _get_info(stack_url)
+ REDIS_INFO["modules"] = stack_info["modules"]
except (KeyError, redis.exceptions.ConnectionError):
pass
@@ -261,6 +267,11 @@ def skip_if_cryptography() -> _TestDecorator:
return pytest.mark.skipif(False, reason="No cryptography dependency")
+def skip_if_resp_version(resp_version) -> _TestDecorator:
+ check = REDIS_INFO.get("resp_version", None) == resp_version
+ return pytest.mark.skipif(check, reason=f"RESP version required != {resp_version}")
+
+
def _get_client(
cls, request, single_connection_client=True, flushdb=True, from_url=None, **kwargs
):
@@ -275,7 +286,7 @@ def _get_client(
redis_url = request.config.getoption("--redis-url")
else:
redis_url = from_url
- if "protocol" not in redis_url:
+ if "protocol" not in redis_url and kwargs.get("protocol") is None:
kwargs["protocol"] = request.config.getoption("--protocol")
cluster_mode = REDIS_INFO["cluster_enabled"]
@@ -327,6 +338,21 @@ def r(request):
yield client
+@pytest.fixture()
+def stack_url(request):
+ stack_url = request.config.getoption("--redis-url", default=default_redismod_url)
+ if stack_url == default_redis_url:
+ return default_redismod_url
+ else:
+ return stack_url
+
+
+@pytest.fixture()
+def stack_r(request, stack_url):
+ with _get_client(redis.Redis, request, from_url=stack_url) as client:
+ yield client
+
+
@pytest.fixture()
def decoded_r(request):
with _get_client(redis.Redis, request, decode_responses=True) as client:
diff --git a/tests/test_asyncio/conftest.py b/tests/test_asyncio/conftest.py
index 5d9e0b4f2e..a6519b9edc 100644
--- a/tests/test_asyncio/conftest.py
+++ b/tests/test_asyncio/conftest.py
@@ -6,12 +6,10 @@
import pytest_asyncio
import redis.asyncio as redis
from packaging.version import Version
-from redis._parsers import _AsyncHiredisParser, _AsyncRESP2Parser
from redis.asyncio.client import Monitor
from redis.asyncio.connection import Connection, parse_url
from redis.asyncio.retry import Retry
from redis.backoff import NoBackoff
-from redis.utils import HIREDIS_AVAILABLE
from tests.conftest import REDIS_INFO
from .compat import mock
@@ -27,41 +25,21 @@ async def _get_info(redis_url):
@pytest_asyncio.fixture(
params=[
pytest.param(
- (True, _AsyncRESP2Parser),
+ (True,),
marks=pytest.mark.skipif(
'config.REDIS_INFO["cluster_enabled"]', reason="cluster mode enabled"
),
),
- (False, _AsyncRESP2Parser),
- pytest.param(
- (True, _AsyncHiredisParser),
- marks=[
- pytest.mark.skipif(
- 'config.REDIS_INFO["cluster_enabled"]',
- reason="cluster mode enabled",
- ),
- pytest.mark.skipif(
- not HIREDIS_AVAILABLE, reason="hiredis is not installed"
- ),
- ],
- ),
- pytest.param(
- (False, _AsyncHiredisParser),
- marks=pytest.mark.skipif(
- not HIREDIS_AVAILABLE, reason="hiredis is not installed"
- ),
- ),
+ (False,),
],
ids=[
- "single-python-parser",
- "pool-python-parser",
- "single-hiredis",
- "pool-hiredis",
+ "single",
+ "pool",
],
)
async def create_redis(request):
"""Wrapper around redis.create_redis."""
- single_connection, parser_cls = request.param
+ (single_connection,) = request.param
teardown_clients = []
@@ -69,19 +47,17 @@ async def client_factory(
url: str = request.config.getoption("--redis-url"),
cls=redis.Redis,
flushdb=True,
- protocol=request.config.getoption("--protocol"),
**kwargs,
):
- if "protocol" not in url:
+ if "protocol" not in url and kwargs.get("protocol") is None:
kwargs["protocol"] = request.config.getoption("--protocol")
cluster_mode = REDIS_INFO["cluster_enabled"]
if not cluster_mode:
single = kwargs.pop("single_connection_client", False) or single_connection
- parser_class = kwargs.pop("parser_class", None) or parser_cls
url_options = parse_url(url)
url_options.update(kwargs)
- pool = redis.ConnectionPool(parser_class=parser_class, **url_options)
+ pool = redis.ConnectionPool(**url_options)
client = cls(connection_pool=pool)
else:
client = redis.RedisCluster.from_url(url, **kwargs)
diff --git a/tests/test_asyncio/test_bloom.py b/tests/test_asyncio/test_bloom.py
index 278844416f..c63559a31c 100644
--- a/tests/test_asyncio/test_bloom.py
+++ b/tests/test_asyncio/test_bloom.py
@@ -1,6 +1,7 @@
from math import inf
import pytest
+import pytest_asyncio
import redis.asyncio as redis
from redis.exceptions import ModuleError, RedisError
from redis.utils import HIREDIS_AVAILABLE
@@ -11,10 +12,16 @@
)
+@pytest_asyncio.fixture()
+async def decoded_r(create_redis, stack_url):
+ return await create_redis(decode_responses=True, url=stack_url)
+
+
def intlist(obj):
return [int(v) for v in obj]
+@pytest.mark.redismod
async def test_create(decoded_r: redis.Redis):
"""Test CREATE/RESERVE calls"""
assert await decoded_r.bf().create("bloom", 0.01, 1000)
@@ -30,10 +37,12 @@ async def test_create(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
async def test_tdigest_create(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("tDigest", 100)
+@pytest.mark.redismod
async def test_bf_add(decoded_r: redis.Redis):
assert await decoded_r.bf().create("bloom", 0.01, 1000)
assert 1 == await decoded_r.bf().add("bloom", "foo")
@@ -46,6 +55,7 @@ async def test_bf_add(decoded_r: redis.Redis):
assert [1, 0] == intlist(await decoded_r.bf().mexists("bloom", "foo", "noexist"))
+@pytest.mark.redismod
async def test_bf_insert(decoded_r: redis.Redis):
assert await decoded_r.bf().create("bloom", 0.01, 1000)
assert [1] == intlist(await decoded_r.bf().insert("bloom", ["foo"]))
@@ -76,6 +86,7 @@ async def test_bf_insert(decoded_r: redis.Redis):
)
+@pytest.mark.redismod
async def test_bf_scandump_and_loadchunk(decoded_r: redis.Redis):
# Store a filter
await decoded_r.bf().create("myBloom", "0.0001", "1000")
@@ -127,6 +138,7 @@ async def do_verify():
await decoded_r.bf().create("myBloom", "0.0001", "10000000")
+@pytest.mark.redismod
async def test_bf_info(decoded_r: redis.Redis):
expansion = 4
# Store a filter
@@ -158,6 +170,7 @@ async def test_bf_info(decoded_r: redis.Redis):
assert True
+@pytest.mark.redismod
async def test_bf_card(decoded_r: redis.Redis):
# return 0 if the key does not exist
assert await decoded_r.bf().card("not_exist") == 0
@@ -172,6 +185,7 @@ async def test_bf_card(decoded_r: redis.Redis):
await decoded_r.bf().card("setKey")
+@pytest.mark.redismod
async def test_cf_add_and_insert(decoded_r: redis.Redis):
assert await decoded_r.cf().create("cuckoo", 1000)
assert await decoded_r.cf().add("cuckoo", "filter")
@@ -197,6 +211,7 @@ async def test_cf_add_and_insert(decoded_r: redis.Redis):
)
+@pytest.mark.redismod
async def test_cf_exists_and_del(decoded_r: redis.Redis):
assert await decoded_r.cf().create("cuckoo", 1000)
assert await decoded_r.cf().add("cuckoo", "filter")
@@ -208,6 +223,7 @@ async def test_cf_exists_and_del(decoded_r: redis.Redis):
assert 0 == await decoded_r.cf().count("cuckoo", "filter")
+@pytest.mark.redismod
async def test_cms(decoded_r: redis.Redis):
assert await decoded_r.cms().initbydim("dim", 1000, 5)
assert await decoded_r.cms().initbyprob("prob", 0.01, 0.01)
@@ -224,6 +240,7 @@ async def test_cms(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
async def test_cms_merge(decoded_r: redis.Redis):
assert await decoded_r.cms().initbydim("A", 1000, 5)
assert await decoded_r.cms().initbydim("B", 1000, 5)
@@ -240,6 +257,7 @@ async def test_cms_merge(decoded_r: redis.Redis):
assert [16, 15, 21] == await decoded_r.cms().query("C", "foo", "bar", "baz")
+@pytest.mark.redismod
async def test_topk(decoded_r: redis.Redis):
# test list with empty buckets
assert await decoded_r.topk().reserve("topk", 3, 50, 4, 0.9)
@@ -320,6 +338,7 @@ async def test_topk(decoded_r: redis.Redis):
assert 0.9 == round(float(info["decay"]), 1)
+@pytest.mark.redismod
async def test_topk_incrby(decoded_r: redis.Redis):
await decoded_r.flushdb()
assert await decoded_r.topk().reserve("topk", 3, 10, 3, 1)
@@ -335,6 +354,7 @@ async def test_topk_incrby(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
async def test_tdigest_reset(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("tDigest", 10)
# reset on empty histogram
@@ -351,6 +371,7 @@ async def test_tdigest_reset(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
async def test_tdigest_merge(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("to-tDigest", 10)
assert await decoded_r.tdigest().create("from-tDigest", 10)
@@ -378,6 +399,7 @@ async def test_tdigest_merge(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
async def test_tdigest_min_and_max(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("tDigest", 100)
# insert data-points into sketch
@@ -388,6 +410,7 @@ async def test_tdigest_min_and_max(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.0", "bf")
async def test_tdigest_quantile(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("tDigest", 500)
@@ -416,6 +439,7 @@ async def test_tdigest_quantile(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
async def test_tdigest_cdf(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("tDigest", 100)
# insert data-points into sketch
@@ -427,6 +451,7 @@ async def test_tdigest_cdf(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.0", "bf")
async def test_tdigest_trimmed_mean(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("tDigest", 100)
@@ -437,6 +462,7 @@ async def test_tdigest_trimmed_mean(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
async def test_tdigest_rank(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("t-digest", 500)
assert await decoded_r.tdigest().add("t-digest", list(range(0, 20)))
@@ -447,6 +473,7 @@ async def test_tdigest_rank(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
async def test_tdigest_revrank(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("t-digest", 500)
assert await decoded_r.tdigest().add("t-digest", list(range(0, 20)))
@@ -456,6 +483,7 @@ async def test_tdigest_revrank(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
async def test_tdigest_byrank(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("t-digest", 500)
assert await decoded_r.tdigest().add("t-digest", list(range(1, 11)))
@@ -467,6 +495,7 @@ async def test_tdigest_byrank(decoded_r: redis.Redis):
@pytest.mark.experimental
+@pytest.mark.redismod
async def test_tdigest_byrevrank(decoded_r: redis.Redis):
assert await decoded_r.tdigest().create("t-digest", 500)
assert await decoded_r.tdigest().add("t-digest", list(range(1, 11)))
@@ -475,19 +504,3 @@ async def test_tdigest_byrevrank(decoded_r: redis.Redis):
assert (await decoded_r.tdigest().byrevrank("t-digest", 100))[0] == -inf
with pytest.raises(redis.ResponseError):
(await decoded_r.tdigest().byrevrank("t-digest", -1))[0]
-
-
-# # async def test_pipeline(decoded_r: redis.Redis):
-# pipeline = await decoded_r.bf().pipeline()
-# assert not await decoded_r.bf().execute_command("get pipeline")
-#
-# assert await decoded_r.bf().create("pipeline", 0.01, 1000)
-# for i in range(100):
-# pipeline.add("pipeline", i)
-# for i in range(100):
-# assert not (await decoded_r.bf().exists("pipeline", i))
-#
-# pipeline.execute()
-#
-# for i in range(100):
-# assert await decoded_r.bf().exists("pipeline", i)
diff --git a/tests/test_asyncio/test_cluster.py b/tests/test_asyncio/test_cluster.py
index d0b92fb4a6..ac12584be5 100644
--- a/tests/test_asyncio/test_cluster.py
+++ b/tests/test_asyncio/test_cluster.py
@@ -128,7 +128,9 @@ async def slowlog(r: RedisCluster) -> None:
await r.config_set("slowlog-max-len", old_max_length_value)
-async def get_mocked_redis_client(*args, **kwargs) -> RedisCluster:
+async def get_mocked_redis_client(
+ cluster_slots_raise_error=False, *args, **kwargs
+) -> RedisCluster:
"""
Return a stable RedisCluster object that have deterministic
nodes and slots setup to remove the problem of different IP addresses
@@ -140,9 +142,13 @@ async def get_mocked_redis_client(*args, **kwargs) -> RedisCluster:
with mock.patch.object(ClusterNode, "execute_command") as execute_command_mock:
async def execute_command(*_args, **_kwargs):
+
if _args[0] == "CLUSTER SLOTS":
- mock_cluster_slots = cluster_slots
- return mock_cluster_slots
+ if cluster_slots_raise_error:
+ raise ResponseError()
+ else:
+ mock_cluster_slots = cluster_slots
+ return mock_cluster_slots
elif _args[0] == "COMMAND":
return {"get": [], "set": []}
elif _args[0] == "INFO":
@@ -2457,7 +2463,10 @@ async def test_init_slots_cache_cluster_mode_disabled(self) -> None:
"""
with pytest.raises(RedisClusterException) as e:
rc = await get_mocked_redis_client(
- host=default_host, port=default_port, cluster_enabled=False
+ cluster_slots_raise_error=True,
+ host=default_host,
+ port=default_port,
+ cluster_enabled=False,
)
await rc.aclose()
assert "Cluster mode is not enabled on this node" in str(e.value)
@@ -2718,10 +2727,9 @@ async def parse_response(
async with r.pipeline() as pipe:
with pytest.raises(ClusterDownError):
await pipe.get(key).execute()
-
assert (
node.parse_response.await_count
- == 4 * r.cluster_error_retry_attempts - 3
+ == 3 * r.cluster_error_retry_attempts - 2
)
async def test_connection_error_not_raised(self, r: RedisCluster) -> None:
@@ -2885,8 +2893,9 @@ class TestSSL:
appropriate port.
"""
- SERVER_CERT = get_ssl_filename("server-cert.pem")
- SERVER_KEY = get_ssl_filename("server-key.pem")
+ CA_CERT = get_ssl_filename("ca-cert.pem")
+ CLIENT_CERT = get_ssl_filename("client-cert.pem")
+ CLIENT_KEY = get_ssl_filename("client-key.pem")
@pytest_asyncio.fixture()
def create_client(self, request: FixtureRequest) -> Callable[..., RedisCluster]:
@@ -3010,24 +3019,24 @@ async def test_validating_self_signed_certificate(
) -> None:
async with await create_client(
ssl=True,
- ssl_ca_certs=self.SERVER_CERT,
+ ssl_ca_certs=self.CA_CERT,
ssl_cert_reqs="required",
- ssl_certfile=self.SERVER_CERT,
- ssl_keyfile=self.SERVER_KEY,
+ ssl_certfile=self.CLIENT_CERT,
+ ssl_keyfile=self.CLIENT_KEY,
) as rc:
assert await rc.ping()
async def test_validating_self_signed_string_certificate(
self, create_client: Callable[..., Awaitable[RedisCluster]]
) -> None:
- with open(self.SERVER_CERT) as f:
+ with open(self.CA_CERT) as f:
cert_data = f.read()
async with await create_client(
ssl=True,
ssl_ca_data=cert_data,
ssl_cert_reqs="required",
- ssl_certfile=self.SERVER_CERT,
- ssl_keyfile=self.SERVER_KEY,
+ ssl_certfile=self.CLIENT_CERT,
+ ssl_keyfile=self.CLIENT_KEY,
) as rc:
assert await rc.ping()
diff --git a/tests/test_asyncio/test_commands.py b/tests/test_asyncio/test_commands.py
index 9ed7b84184..cc84c412d0 100644
--- a/tests/test_asyncio/test_commands.py
+++ b/tests/test_asyncio/test_commands.py
@@ -1348,6 +1348,19 @@ async def test_hscan(self, r: redis.Redis):
assert dic == {b"a": b"1", b"b": b"2", b"c": b"3"}
_, dic = await r.hscan("a", match="a")
assert dic == {b"a": b"1"}
+ _, dic = await r.hscan("a_notset", match="a")
+ assert dic == {}
+
+ @skip_if_server_version_lt("7.3.240")
+ async def test_hscan_novalues(self, r: redis.Redis):
+ await r.hset("a", mapping={"a": 1, "b": 2, "c": 3})
+ cursor, keys = await r.hscan("a", no_values=True)
+ assert cursor == 0
+ assert sorted(keys) == [b"a", b"b", b"c"]
+ _, keys = await r.hscan("a", match="a", no_values=True)
+ assert keys == [b"a"]
+ _, keys = await r.hscan("a_notset", match="a", no_values=True)
+ assert keys == []
@skip_if_server_version_lt("2.8.0")
async def test_hscan_iter(self, r: redis.Redis):
@@ -1356,6 +1369,20 @@ async def test_hscan_iter(self, r: redis.Redis):
assert dic == {b"a": b"1", b"b": b"2", b"c": b"3"}
dic = {k: v async for k, v in r.hscan_iter("a", match="a")}
assert dic == {b"a": b"1"}
+ dic = {k: v async for k, v in r.hscan_iter("a_notset", match="a")}
+ assert dic == {}
+
+ @skip_if_server_version_lt("7.3.240")
+ async def test_hscan_iter_novalues(self, r: redis.Redis):
+ await r.hset("a", mapping={"a": 1, "b": 2, "c": 3})
+ keys = list([k async for k in r.hscan_iter("a", no_values=True)])
+ assert sorted(keys) == [b"a", b"b", b"c"]
+ keys = list([k async for k in r.hscan_iter("a", match="a", no_values=True)])
+ assert keys == [b"a"]
+ keys = list(
+ [k async for k in r.hscan_iter("a", match="a_notset", no_values=True)]
+ )
+ assert keys == []
@skip_if_server_version_lt("2.8.0")
async def test_zscan(self, r: redis.Redis):
@@ -2891,6 +2918,31 @@ async def test_xinfo_stream(self, r: redis.Redis):
assert info["first-entry"] == await get_stream_message(r, stream, m1)
assert info["last-entry"] == await get_stream_message(r, stream, m2)
+ await r.xtrim(stream, 0)
+ info = await r.xinfo_stream(stream)
+ assert info["length"] == 0
+ assert info["first-entry"] is None
+ assert info["last-entry"] is None
+
+ @skip_if_server_version_lt("6.0.0")
+ async def test_xinfo_stream_full(self, r: redis.Redis):
+ stream = "stream"
+ group = "group"
+
+ await r.xadd(stream, {"foo": "bar"})
+ info = await r.xinfo_stream(stream, full=True)
+ assert info["length"] == 1
+ assert len(info["groups"]) == 0
+
+ await r.xgroup_create(stream, group, 0)
+ info = await r.xinfo_stream(stream, full=True)
+ assert info["length"] == 1
+
+ await r.xreadgroup(group, "consumer", streams={stream: ">"})
+ info = await r.xinfo_stream(stream, full=True)
+ consumer = info["groups"][0]["consumers"][0]
+ assert isinstance(consumer, dict)
+
@skip_if_server_version_lt("5.0.0")
async def test_xlen(self, r: redis.Redis):
stream = "stream"
diff --git a/tests/test_asyncio/test_connect.py b/tests/test_asyncio/test_connect.py
index 6c902c2d05..0df7ebb43a 100644
--- a/tests/test_asyncio/test_connect.py
+++ b/tests/test_asyncio/test_connect.py
@@ -61,13 +61,14 @@ async def test_uds_connect(uds_address):
)
async def test_tcp_ssl_tls12_custom_ciphers(tcp_address, ssl_ciphers):
host, port = tcp_address
- certfile = get_ssl_filename("server-cert.pem")
- keyfile = get_ssl_filename("server-key.pem")
+ certfile = get_ssl_filename("client-cert.pem")
+ keyfile = get_ssl_filename("client-key.pem")
+ ca_certfile = get_ssl_filename("ca-cert.pem")
conn = SSLConnection(
host=host,
port=port,
client_name=_CLIENT_NAME,
- ssl_ca_certs=certfile,
+ ssl_ca_certs=ca_certfile,
socket_timeout=10,
ssl_min_version=ssl.TLSVersion.TLSv1_2,
ssl_ciphers=ssl_ciphers,
@@ -89,13 +90,14 @@ async def test_tcp_ssl_tls12_custom_ciphers(tcp_address, ssl_ciphers):
)
async def test_tcp_ssl_connect(tcp_address, ssl_min_version):
host, port = tcp_address
- certfile = get_ssl_filename("server-cert.pem")
- keyfile = get_ssl_filename("server-key.pem")
+ certfile = get_ssl_filename("client-cert.pem")
+ keyfile = get_ssl_filename("client-key.pem")
+ ca_certfile = get_ssl_filename("ca-cert.pem")
conn = SSLConnection(
host=host,
port=port,
client_name=_CLIENT_NAME,
- ssl_ca_certs=certfile,
+ ssl_ca_certs=ca_certfile,
socket_timeout=10,
ssl_min_version=ssl_min_version,
)
diff --git a/tests/test_asyncio/test_connection.py b/tests/test_asyncio/test_connection.py
index 55a1c3a2f6..e584fc6999 100644
--- a/tests/test_asyncio/test_connection.py
+++ b/tests/test_asyncio/test_connection.py
@@ -12,7 +12,12 @@
_AsyncRESPBase,
)
from redis.asyncio import ConnectionPool, Redis
-from redis.asyncio.connection import Connection, UnixDomainSocketConnection, parse_url
+from redis.asyncio.connection import (
+ Connection,
+ SSLConnection,
+ UnixDomainSocketConnection,
+ parse_url,
+)
from redis.asyncio.retry import Retry
from redis.backoff import NoBackoff
from redis.exceptions import ConnectionError, InvalidResponse, TimeoutError
@@ -68,7 +73,7 @@ async def call_with_retry(self, _, __):
in_use = False
return "foo"
- mock_conn = mock.MagicMock()
+ mock_conn = mock.AsyncMock(spec=Connection)
mock_conn.retry = Retry_()
async def get_conn(_):
@@ -490,3 +495,53 @@ async def test_connection_garbage_collection(request):
await client.aclose()
await pool.aclose()
+
+
+@pytest.mark.parametrize(
+ "conn, error, expected_message",
+ [
+ (SSLConnection(), OSError(), "Error connecting to localhost:6379."),
+ (SSLConnection(), OSError(12), "Error 12 connecting to localhost:6379."),
+ (
+ SSLConnection(),
+ OSError(12, "Some Error"),
+ "Error 12 connecting to localhost:6379. Some Error.",
+ ),
+ (
+ UnixDomainSocketConnection(path="unix:///tmp/redis.sock"),
+ OSError(),
+ "Error connecting to unix:///tmp/redis.sock.",
+ ),
+ (
+ UnixDomainSocketConnection(path="unix:///tmp/redis.sock"),
+ OSError(12),
+ "Error 12 connecting to unix:///tmp/redis.sock.",
+ ),
+ (
+ UnixDomainSocketConnection(path="unix:///tmp/redis.sock"),
+ OSError(12, "Some Error"),
+ "Error 12 connecting to unix:///tmp/redis.sock. Some Error.",
+ ),
+ ],
+)
+async def test_format_error_message(conn, error, expected_message):
+ """Test that the _error_message function formats errors correctly"""
+ error_message = conn._error_message(error)
+ assert error_message == expected_message
+
+
+async def test_network_connection_failure():
+ with pytest.raises(ConnectionError) as e:
+ redis = Redis(host="127.0.0.1", port=9999)
+ await redis.set("a", "b")
+ assert str(e.value).startswith("Error 111 connecting to 127.0.0.1:9999. Connect")
+
+
+async def test_unix_socket_connection_failure():
+ with pytest.raises(ConnectionError) as e:
+ redis = Redis(unix_socket_path="unix:///tmp/a.sock")
+ await redis.set("a", "b")
+ assert (
+ str(e.value)
+ == "Error 2 connecting to unix:///tmp/a.sock. No such file or directory."
+ )
diff --git a/tests/test_asyncio/test_connection_pool.py b/tests/test_asyncio/test_connection_pool.py
index ed90fc73fc..2f5bbfb621 100644
--- a/tests/test_asyncio/test_connection_pool.py
+++ b/tests/test_asyncio/test_connection_pool.py
@@ -180,11 +180,8 @@ async def test_repr_contains_db_info_tcp(self):
async with self.get_pool(
connection_kwargs=connection_kwargs, connection_class=redis.Connection
) as pool:
- expected = (
- "ConnectionPool>"
- )
- assert repr(pool) == expected
+ expected = "host=localhost,port=6379,db=1,client_name=test-client"
+ assert expected in repr(pool)
async def test_repr_contains_db_info_unix(self):
connection_kwargs = {"path": "/abc", "db": 1, "client_name": "test-client"}
@@ -192,11 +189,8 @@ async def test_repr_contains_db_info_unix(self):
connection_kwargs=connection_kwargs,
connection_class=redis.UnixDomainSocketConnection,
) as pool:
- expected = (
- "ConnectionPool>"
- )
- assert repr(pool) == expected
+ expected = "path=/abc,db=1,client_name=test-client"
+ assert expected in repr(pool)
class TestBlockingConnectionPool:
@@ -293,11 +287,8 @@ def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(
host="localhost", port=6379, client_name="test-client"
)
- expected = (
- "ConnectionPool>"
- )
- assert repr(pool) == expected
+ expected = "host=localhost,port=6379,db=0,client_name=test-client"
+ assert expected in repr(pool)
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
@@ -305,11 +296,8 @@ def test_repr_contains_db_info_unix(self):
path="abc",
client_name="test-client",
)
- expected = (
- "ConnectionPool>"
- )
- assert repr(pool) == expected
+ expected = "path=abc,db=0,client_name=test-client"
+ assert expected in repr(pool)
class TestConnectionPoolURLParsing:
@@ -659,7 +647,9 @@ def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url("redis://localhost")
pool = connection.connection_pool
- assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
+ assert re.match(
+ r"< .*?([^\.]+) \( < .*?([^\.]+) \( (.+) \) > \) >", repr(pool), re.VERBOSE
+ ).groups() == (
"ConnectionPool",
"Connection",
"host=localhost,port=6379,db=0",
@@ -669,7 +659,9 @@ def test_connect_from_url_unix(self):
connection = redis.Redis.from_url("unix:///path/to/socket")
pool = connection.connection_pool
- assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
+ assert re.match(
+ r"< .*?([^\.]+) \( < .*?([^\.]+) \( (.+) \) > \) >", repr(pool), re.VERBOSE
+ ).groups() == (
"ConnectionPool",
"UnixDomainSocketConnection",
"path=/path/to/socket,db=0",
diff --git a/tests/test_asyncio/test_graph.py b/tests/test_asyncio/test_graph.py
index d1649b617b..2014ea38b6 100644
--- a/tests/test_asyncio/test_graph.py
+++ b/tests/test_asyncio/test_graph.py
@@ -1,11 +1,19 @@
import pytest
+import pytest_asyncio
import redis.asyncio as redis
from redis.commands.graph import Edge, Node, Path
from redis.commands.graph.execution_plan import Operation
from redis.exceptions import ResponseError
-from tests.conftest import skip_if_redis_enterprise
+from tests.conftest import skip_if_redis_enterprise, skip_if_resp_version
+@pytest_asyncio.fixture()
+async def decoded_r(create_redis, stack_url):
+ return await create_redis(decode_responses=True, url="redis://localhost:6480")
+
+
+@pytest.mark.redismod
+@skip_if_resp_version(3)
async def test_bulk(decoded_r):
with pytest.raises(NotImplementedError):
await decoded_r.graph().bulk()
@@ -13,7 +21,7 @@ async def test_bulk(decoded_r):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_graph_creation(decoded_r: redis.Redis):
graph = decoded_r.graph()
@@ -59,7 +67,7 @@ async def test_graph_creation(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_array_functions(decoded_r: redis.Redis):
graph = decoded_r.graph()
@@ -83,7 +91,7 @@ async def test_array_functions(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_path(decoded_r: redis.Redis):
node0 = Node(node_id=0, label="L1")
node1 = Node(node_id=1, label="L1")
@@ -104,7 +112,7 @@ async def test_path(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_param(decoded_r: redis.Redis):
params = [1, 2.3, "str", True, False, None, [0, 1, 2]]
query = "RETURN $param"
@@ -115,7 +123,7 @@ async def test_param(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_map(decoded_r: redis.Redis):
query = "RETURN {a:1, b:'str', c:NULL, d:[1,2,3], e:True, f:{x:1, y:2}}"
@@ -133,7 +141,7 @@ async def test_map(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_point(decoded_r: redis.Redis):
query = "RETURN point({latitude: 32.070794860, longitude: 34.820751118})"
expected_lat = 32.070794860
@@ -151,7 +159,7 @@ async def test_point(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_index_response(decoded_r: redis.Redis):
result_set = await decoded_r.graph().query("CREATE INDEX ON :person(age)")
assert 1 == result_set.indices_created
@@ -167,7 +175,7 @@ async def test_index_response(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_stringify_query_result(decoded_r: redis.Redis):
graph = decoded_r.graph()
@@ -222,7 +230,7 @@ async def test_stringify_query_result(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_optional_match(decoded_r: redis.Redis):
# Build a graph of form (a)-[R]->(b)
node0 = Node(node_id=0, label="L1", properties={"value": "a"})
@@ -248,7 +256,7 @@ async def test_optional_match(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_cached_execution(decoded_r: redis.Redis):
await decoded_r.graph().query("CREATE ()")
@@ -269,7 +277,7 @@ async def test_cached_execution(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_slowlog(decoded_r: redis.Redis):
create_query = """CREATE
(:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
@@ -284,7 +292,7 @@ async def test_slowlog(decoded_r: redis.Redis):
@pytest.mark.xfail(strict=False)
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_query_timeout(decoded_r: redis.Redis):
# Build a sample graph with 1000 nodes.
await decoded_r.graph().query("UNWIND range(0,1000) as val CREATE ({v: val})")
@@ -299,7 +307,7 @@ async def test_query_timeout(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_read_only_query(decoded_r: redis.Redis):
with pytest.raises(Exception):
# Issue a write query, specifying read-only true,
@@ -309,7 +317,7 @@ async def test_read_only_query(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_profile(decoded_r: redis.Redis):
q = """UNWIND range(1, 3) AS x CREATE (p:Person {v:x})"""
profile = (await decoded_r.graph().profile(q)).result_set
@@ -326,7 +334,7 @@ async def test_profile(decoded_r: redis.Redis):
@skip_if_redis_enterprise()
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_config(decoded_r: redis.Redis):
config_name = "RESULTSET_SIZE"
config_value = 3
@@ -359,7 +367,7 @@ async def test_config(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_list_keys(decoded_r: redis.Redis):
result = await decoded_r.graph().list_keys()
assert result == []
@@ -383,7 +391,7 @@ async def test_list_keys(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_multi_label(decoded_r: redis.Redis):
redis_graph = decoded_r.graph("g")
@@ -410,7 +418,7 @@ async def test_multi_label(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_execution_plan(decoded_r: redis.Redis):
redis_graph = decoded_r.graph("execution_plan")
create_query = """CREATE
@@ -430,7 +438,7 @@ async def test_execution_plan(decoded_r: redis.Redis):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
async def test_explain(decoded_r: redis.Redis):
redis_graph = decoded_r.graph("execution_plan")
# graph creation / population
diff --git a/tests/test_asyncio/test_hash.py b/tests/test_asyncio/test_hash.py
new file mode 100644
index 0000000000..e31ea7eaf3
--- /dev/null
+++ b/tests/test_asyncio/test_hash.py
@@ -0,0 +1,300 @@
+import asyncio
+from datetime import datetime, timedelta
+
+from tests.conftest import skip_if_server_version_lt
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpire_basic(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ assert await r.hexpire("test:hash", 1, "field1") == [1]
+ await asyncio.sleep(1.1)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpire_with_timedelta(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ assert await r.hexpire("test:hash", timedelta(seconds=1), "field1") == [1]
+ await asyncio.sleep(1.1)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpire_conditions(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1"})
+ assert await r.hexpire("test:hash", 2, "field1", xx=True) == [0]
+ assert await r.hexpire("test:hash", 2, "field1", nx=True) == [1]
+ assert await r.hexpire("test:hash", 1, "field1", xx=True) == [1]
+ assert await r.hexpire("test:hash", 2, "field1", nx=True) == [0]
+ await asyncio.sleep(1.1)
+ assert await r.hexists("test:hash", "field1") is False
+ await r.hset("test:hash", "field1", "value1")
+ await r.hexpire("test:hash", 2, "field1")
+ assert await r.hexpire("test:hash", 1, "field1", gt=True) == [0]
+ assert await r.hexpire("test:hash", 1, "field1", lt=True) == [1]
+ await asyncio.sleep(1.1)
+ assert await r.hexists("test:hash", "field1") is False
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpire_nonexistent_key_or_field(r):
+ await r.delete("test:hash")
+ assert await r.hexpire("test:hash", 1, "field1") == [-2]
+ await r.hset("test:hash", "field1", "value1")
+ assert await r.hexpire("test:hash", 1, "nonexistent_field") == [-2]
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpire_multiple_fields(r):
+ await r.delete("test:hash")
+ await r.hset(
+ "test:hash",
+ mapping={"field1": "value1", "field2": "value2", "field3": "value3"},
+ )
+ assert await r.hexpire("test:hash", 1, "field1", "field2") == [1, 1]
+ await asyncio.sleep(1.1)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is False
+ assert await r.hexists("test:hash", "field3") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpire_basic(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ assert await r.hpexpire("test:hash", 500, "field1") == [1]
+ await asyncio.sleep(0.6)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpire_with_timedelta(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ assert await r.hpexpire("test:hash", timedelta(milliseconds=500), "field1") == [1]
+ await asyncio.sleep(0.6)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpire_conditions(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1"})
+ assert await r.hpexpire("test:hash", 1500, "field1", xx=True) == [0]
+ assert await r.hpexpire("test:hash", 1500, "field1", nx=True) == [1]
+ assert await r.hpexpire("test:hash", 500, "field1", xx=True) == [1]
+ assert await r.hpexpire("test:hash", 1500, "field1", nx=True) == [0]
+ await asyncio.sleep(0.6)
+ assert await r.hexists("test:hash", "field1") is False
+ await r.hset("test:hash", "field1", "value1")
+ await r.hpexpire("test:hash", 1000, "field1")
+ assert await r.hpexpire("test:hash", 500, "field1", gt=True) == [0]
+ assert await r.hpexpire("test:hash", 500, "field1", lt=True) == [1]
+ await asyncio.sleep(0.6)
+ assert await r.hexists("test:hash", "field1") is False
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpire_nonexistent_key_or_field(r):
+ await r.delete("test:hash")
+ assert await r.hpexpire("test:hash", 500, "field1") == [-2]
+ await r.hset("test:hash", "field1", "value1")
+ assert await r.hpexpire("test:hash", 500, "nonexistent_field") == [-2]
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpire_multiple_fields(r):
+ await r.delete("test:hash")
+ await r.hset(
+ "test:hash",
+ mapping={"field1": "value1", "field2": "value2", "field3": "value3"},
+ )
+ assert await r.hpexpire("test:hash", 500, "field1", "field2") == [1, 1]
+ await asyncio.sleep(0.6)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is False
+ assert await r.hexists("test:hash", "field3") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpireat_basic(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ exp_time = int((datetime.now() + timedelta(seconds=1)).timestamp())
+ assert await r.hexpireat("test:hash", exp_time, "field1") == [1]
+ await asyncio.sleep(1.1)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpireat_with_datetime(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ exp_time = datetime.now() + timedelta(seconds=1)
+ assert await r.hexpireat("test:hash", exp_time, "field1") == [1]
+ await asyncio.sleep(1.1)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpireat_conditions(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1"})
+ future_exp_time = int((datetime.now() + timedelta(seconds=2)).timestamp())
+ past_exp_time = int((datetime.now() - timedelta(seconds=1)).timestamp())
+ assert await r.hexpireat("test:hash", future_exp_time, "field1", xx=True) == [0]
+ assert await r.hexpireat("test:hash", future_exp_time, "field1", nx=True) == [1]
+ assert await r.hexpireat("test:hash", past_exp_time, "field1", gt=True) == [0]
+ assert await r.hexpireat("test:hash", past_exp_time, "field1", lt=True) == [2]
+ assert await r.hexists("test:hash", "field1") is False
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpireat_nonexistent_key_or_field(r):
+ await r.delete("test:hash")
+ future_exp_time = int((datetime.now() + timedelta(seconds=1)).timestamp())
+ assert await r.hexpireat("test:hash", future_exp_time, "field1") == [-2]
+ await r.hset("test:hash", "field1", "value1")
+ assert await r.hexpireat("test:hash", future_exp_time, "nonexistent_field") == [-2]
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpireat_multiple_fields(r):
+ await r.delete("test:hash")
+ await r.hset(
+ "test:hash",
+ mapping={"field1": "value1", "field2": "value2", "field3": "value3"},
+ )
+ exp_time = int((datetime.now() + timedelta(seconds=1)).timestamp())
+ assert await r.hexpireat("test:hash", exp_time, "field1", "field2") == [1, 1]
+ await asyncio.sleep(1.1)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is False
+ assert await r.hexists("test:hash", "field3") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpireat_basic(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ exp_time = int((datetime.now() + timedelta(milliseconds=400)).timestamp() * 1000)
+ assert await r.hpexpireat("test:hash", exp_time, "field1") == [1]
+ await asyncio.sleep(0.5)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpireat_with_datetime(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ exp_time = datetime.now() + timedelta(milliseconds=400)
+ assert await r.hpexpireat("test:hash", exp_time, "field1") == [1]
+ await asyncio.sleep(0.5)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpireat_conditions(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1"})
+ future_exp_time = int(
+ (datetime.now() + timedelta(milliseconds=500)).timestamp() * 1000
+ )
+ past_exp_time = int(
+ (datetime.now() - timedelta(milliseconds=500)).timestamp() * 1000
+ )
+ assert await r.hpexpireat("test:hash", future_exp_time, "field1", xx=True) == [0]
+ assert await r.hpexpireat("test:hash", future_exp_time, "field1", nx=True) == [1]
+ assert await r.hpexpireat("test:hash", past_exp_time, "field1", gt=True) == [0]
+ assert await r.hpexpireat("test:hash", past_exp_time, "field1", lt=True) == [2]
+ assert await r.hexists("test:hash", "field1") is False
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpireat_nonexistent_key_or_field(r):
+ await r.delete("test:hash")
+ future_exp_time = int(
+ (datetime.now() + timedelta(milliseconds=500)).timestamp() * 1000
+ )
+ assert await r.hpexpireat("test:hash", future_exp_time, "field1") == [-2]
+ await r.hset("test:hash", "field1", "value1")
+ assert await r.hpexpireat("test:hash", future_exp_time, "nonexistent_field") == [-2]
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpireat_multiple_fields(r):
+ await r.delete("test:hash")
+ await r.hset(
+ "test:hash",
+ mapping={"field1": "value1", "field2": "value2", "field3": "value3"},
+ )
+ exp_time = int((datetime.now() + timedelta(milliseconds=400)).timestamp() * 1000)
+ assert await r.hpexpireat("test:hash", exp_time, "field1", "field2") == [1, 1]
+ await asyncio.sleep(0.5)
+ assert await r.hexists("test:hash", "field1") is False
+ assert await r.hexists("test:hash", "field2") is False
+ assert await r.hexists("test:hash", "field3") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpersist_multiple_fields_mixed_conditions(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ await r.hexpire("test:hash", 5000, "field1")
+ assert await r.hpersist("test:hash", "field1", "field2", "field3") == [1, -1, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hexpiretime_multiple_fields_mixed_conditions(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ future_time = int((datetime.now() + timedelta(minutes=30)).timestamp())
+ await r.hexpireat("test:hash", future_time, "field1")
+ result = await r.hexpiretime("test:hash", "field1", "field2", "field3")
+ assert future_time - 10 < result[0] <= future_time
+ assert result[1:] == [-1, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_hpexpiretime_multiple_fields_mixed_conditions(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ future_time = int((datetime.now() + timedelta(minutes=30)).timestamp())
+ await r.hexpireat("test:hash", future_time, "field1")
+ result = await r.hpexpiretime("test:hash", "field1", "field2", "field3")
+ assert future_time * 1000 - 10000 < result[0] <= future_time * 1000
+ assert result[1:] == [-1, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_ttl_multiple_fields_mixed_conditions(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ future_time = int((datetime.now() + timedelta(minutes=30)).timestamp())
+ await r.hexpireat("test:hash", future_time, "field1")
+ result = await r.httl("test:hash", "field1", "field2", "field3")
+ assert 30 * 60 - 10 < result[0] <= 30 * 60
+ assert result[1:] == [-1, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+async def test_pttl_multiple_fields_mixed_conditions(r):
+ await r.delete("test:hash")
+ await r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ future_time = int((datetime.now() + timedelta(minutes=30)).timestamp())
+ await r.hexpireat("test:hash", future_time, "field1")
+ result = await r.hpttl("test:hash", "field1", "field2", "field3")
+ assert 30 * 60000 - 10000 < result[0] <= 30 * 60000
+ assert result[1:] == [-1, -2]
diff --git a/tests/test_asyncio/test_json.py b/tests/test_asyncio/test_json.py
index 81821d931a..852fd4aaa6 100644
--- a/tests/test_asyncio/test_json.py
+++ b/tests/test_asyncio/test_json.py
@@ -1,10 +1,17 @@
import pytest
+import pytest_asyncio
import redis.asyncio as redis
from redis import exceptions
from redis.commands.json.path import Path
from tests.conftest import assert_resp_response, skip_ifmodversion_lt
+@pytest_asyncio.fixture()
+async def decoded_r(create_redis, stack_url):
+ return await create_redis(decode_responses=True, url=stack_url)
+
+
+@pytest.mark.redismod
async def test_json_setbinarykey(decoded_r: redis.Redis):
d = {"hello": "world", b"some": "value"}
with pytest.raises(TypeError):
@@ -12,38 +19,40 @@ async def test_json_setbinarykey(decoded_r: redis.Redis):
assert await decoded_r.json().set("somekey", Path.root_path(), d, decode_keys=True)
+@pytest.mark.redismod
async def test_json_setgetdeleteforget(decoded_r: redis.Redis):
assert await decoded_r.json().set("foo", Path.root_path(), "bar")
- assert_resp_response(decoded_r, await decoded_r.json().get("foo"), "bar", [["bar"]])
+ assert await decoded_r.json().get("foo") == "bar"
assert await decoded_r.json().get("baz") is None
assert await decoded_r.json().delete("foo") == 1
assert await decoded_r.json().forget("foo") == 0 # second delete
assert await decoded_r.exists("foo") == 0
+@pytest.mark.redismod
async def test_jsonget(decoded_r: redis.Redis):
await decoded_r.json().set("foo", Path.root_path(), "bar")
- assert_resp_response(decoded_r, await decoded_r.json().get("foo"), "bar", [["bar"]])
+ assert await decoded_r.json().get("foo") == "bar"
+@pytest.mark.redismod
async def test_json_get_jset(decoded_r: redis.Redis):
assert await decoded_r.json().set("foo", Path.root_path(), "bar")
- assert_resp_response(decoded_r, await decoded_r.json().get("foo"), "bar", [["bar"]])
+ assert await decoded_r.json().get("foo") == "bar"
assert await decoded_r.json().get("baz") is None
assert 1 == await decoded_r.json().delete("foo")
assert await decoded_r.exists("foo") == 0
+@pytest.mark.redismod
async def test_nonascii_setgetdelete(decoded_r: redis.Redis):
assert await decoded_r.json().set("notascii", Path.root_path(), "hyvää-élève")
- res = "hyvää-élève"
- assert_resp_response(
- decoded_r, await decoded_r.json().get("notascii", no_escape=True), res, [[res]]
- )
+ assert await decoded_r.json().get("notascii", no_escape=True) == "hyvää-élève"
assert 1 == await decoded_r.json().delete("notascii")
assert await decoded_r.exists("notascii") == 0
+@pytest.mark.redismod
@skip_ifmodversion_lt("2.6.0", "ReJSON")
async def test_json_merge(decoded_r: redis.Redis):
# Test with root path $
@@ -78,6 +87,7 @@ async def test_json_merge(decoded_r: redis.Redis):
}
+@pytest.mark.redismod
async def test_jsonsetexistentialmodifiersshouldsucceed(decoded_r: redis.Redis):
obj = {"foo": "bar"}
assert await decoded_r.json().set("obj", Path.root_path(), obj)
@@ -90,12 +100,12 @@ async def test_jsonsetexistentialmodifiersshouldsucceed(decoded_r: redis.Redis):
assert await decoded_r.json().set("obj", Path("foo"), "baz", xx=True)
assert await decoded_r.json().set("obj", Path("qaz"), "baz", nx=True)
- # Test that flags are mutually exlusive
+ # Test that flags are mutually exclusive
with pytest.raises(Exception):
await decoded_r.json().set("obj", Path("foo"), "baz", nx=True, xx=True)
-@pytest.mark.onlynoncluster
+@pytest.mark.redismod
async def test_mgetshouldsucceed(decoded_r: redis.Redis):
await decoded_r.json().set("1", Path.root_path(), 1)
await decoded_r.json().set("2", Path.root_path(), 2)
@@ -105,6 +115,7 @@ async def test_mgetshouldsucceed(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
@skip_ifmodversion_lt("2.6.0", "ReJSON")
async def test_mset(decoded_r: redis.Redis):
await decoded_r.json().mset(
@@ -115,13 +126,15 @@ async def test_mset(decoded_r: redis.Redis):
assert await decoded_r.json().mget(["1", "2"], Path.root_path()) == [1, 2]
+@pytest.mark.redismod
@skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release
async def test_clear(decoded_r: redis.Redis):
await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
assert 1 == await decoded_r.json().clear("arr", Path.root_path())
- assert_resp_response(decoded_r, await decoded_r.json().get("arr"), [], [[[]]])
+ assert_resp_response(decoded_r, await decoded_r.json().get("arr"), [], [])
+@pytest.mark.redismod
async def test_type(decoded_r: redis.Redis):
await decoded_r.json().set("1", Path.root_path(), 1)
assert_resp_response(
@@ -135,6 +148,7 @@ async def test_type(decoded_r: redis.Redis):
)
+@pytest.mark.redismod
async def test_numincrby(decoded_r):
await decoded_r.json().set("num", Path.root_path(), 1)
assert_resp_response(
@@ -146,6 +160,7 @@ async def test_numincrby(decoded_r):
assert_resp_response(decoded_r, res, 1.25, [1.25])
+@pytest.mark.redismod
async def test_nummultby(decoded_r: redis.Redis):
await decoded_r.json().set("num", Path.root_path(), 1)
@@ -158,6 +173,7 @@ async def test_nummultby(decoded_r: redis.Redis):
assert_resp_response(decoded_r, res, 2.5, [2.5])
+@pytest.mark.redismod
@skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release
async def test_toggle(decoded_r: redis.Redis):
await decoded_r.json().set("bool", Path.root_path(), False)
@@ -169,13 +185,14 @@ async def test_toggle(decoded_r: redis.Redis):
await decoded_r.json().toggle("num", Path.root_path())
+@pytest.mark.redismod
async def test_strappend(decoded_r: redis.Redis):
await decoded_r.json().set("jsonkey", Path.root_path(), "foo")
assert 6 == await decoded_r.json().strappend("jsonkey", "bar")
- res = await decoded_r.json().get("jsonkey", Path.root_path())
- assert_resp_response(decoded_r, res, "foobar", [["foobar"]])
+ assert "foobar" == await decoded_r.json().get("jsonkey", Path.root_path())
+@pytest.mark.redismod
async def test_strlen(decoded_r: redis.Redis):
await decoded_r.json().set("str", Path.root_path(), "foo")
assert 3 == await decoded_r.json().strlen("str", Path.root_path())
@@ -184,6 +201,7 @@ async def test_strlen(decoded_r: redis.Redis):
assert 6 == await decoded_r.json().strlen("str")
+@pytest.mark.redismod
async def test_arrappend(decoded_r: redis.Redis):
await decoded_r.json().set("arr", Path.root_path(), [1])
assert 2 == await decoded_r.json().arrappend("arr", Path.root_path(), 2)
@@ -191,6 +209,7 @@ async def test_arrappend(decoded_r: redis.Redis):
assert 7 == await decoded_r.json().arrappend("arr", Path.root_path(), *[5, 6, 7])
+@pytest.mark.redismod
async def test_arrindex(decoded_r: redis.Redis):
r_path = Path.root_path()
await decoded_r.json().set("arr", r_path, [0, 1, 2, 3, 4])
@@ -203,19 +222,19 @@ async def test_arrindex(decoded_r: redis.Redis):
assert -1 == await decoded_r.json().arrindex("arr", r_path, 4, start=1, stop=3)
+@pytest.mark.redismod
async def test_arrinsert(decoded_r: redis.Redis):
await decoded_r.json().set("arr", Path.root_path(), [0, 4])
assert 5 == await decoded_r.json().arrinsert("arr", Path.root_path(), 1, *[1, 2, 3])
- res = [0, 1, 2, 3, 4]
- assert_resp_response(decoded_r, await decoded_r.json().get("arr"), res, [[res]])
+ assert await decoded_r.json().get("arr") == [0, 1, 2, 3, 4]
# test prepends
await decoded_r.json().set("val2", Path.root_path(), [5, 6, 7, 8, 9])
await decoded_r.json().arrinsert("val2", Path.root_path(), 0, ["some", "thing"])
- res = [["some", "thing"], 5, 6, 7, 8, 9]
- assert_resp_response(decoded_r, await decoded_r.json().get("val2"), res, [[res]])
+ assert await decoded_r.json().get("val2") == [["some", "thing"], 5, 6, 7, 8, 9]
+@pytest.mark.redismod
async def test_arrlen(decoded_r: redis.Redis):
await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
assert 5 == await decoded_r.json().arrlen("arr", Path.root_path())
@@ -223,13 +242,14 @@ async def test_arrlen(decoded_r: redis.Redis):
assert await decoded_r.json().arrlen("fakekey") is None
+@pytest.mark.redismod
async def test_arrpop(decoded_r: redis.Redis):
await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
assert 4 == await decoded_r.json().arrpop("arr", Path.root_path(), 4)
assert 3 == await decoded_r.json().arrpop("arr", Path.root_path(), -1)
assert 2 == await decoded_r.json().arrpop("arr", Path.root_path())
assert 0 == await decoded_r.json().arrpop("arr", Path.root_path(), 0)
- assert_resp_response(decoded_r, await decoded_r.json().get("arr"), [1], [[[1]]])
+ assert [1] == await decoded_r.json().get("arr")
# test out of bounds
await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
@@ -240,11 +260,11 @@ async def test_arrpop(decoded_r: redis.Redis):
assert await decoded_r.json().arrpop("arr") is None
+@pytest.mark.redismod
async def test_arrtrim(decoded_r: redis.Redis):
await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
assert 3 == await decoded_r.json().arrtrim("arr", Path.root_path(), 1, 3)
- res = await decoded_r.json().get("arr")
- assert_resp_response(decoded_r, res, [1, 2, 3], [[[1, 2, 3]]])
+ assert [1, 2, 3] == await decoded_r.json().get("arr")
# <0 test, should be 0 equivalent
await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
@@ -263,6 +283,7 @@ async def test_arrtrim(decoded_r: redis.Redis):
assert 0 == await decoded_r.json().arrtrim("arr", Path.root_path(), 9, 11)
+@pytest.mark.redismod
async def test_resp(decoded_r: redis.Redis):
obj = {"foo": "bar", "baz": 1, "qaz": True}
await decoded_r.json().set("obj", Path.root_path(), obj)
@@ -272,6 +293,7 @@ async def test_resp(decoded_r: redis.Redis):
assert isinstance(await decoded_r.json().resp("obj"), list)
+@pytest.mark.redismod
async def test_objkeys(decoded_r: redis.Redis):
obj = {"foo": "bar", "baz": "qaz"}
await decoded_r.json().set("obj", Path.root_path(), obj)
@@ -288,6 +310,7 @@ async def test_objkeys(decoded_r: redis.Redis):
assert await decoded_r.json().objkeys("fakekey") is None
+@pytest.mark.redismod
async def test_objlen(decoded_r: redis.Redis):
obj = {"foo": "bar", "baz": "qaz"}
await decoded_r.json().set("obj", Path.root_path(), obj)
@@ -321,19 +344,19 @@ async def test_objlen(decoded_r: redis.Redis):
# assert await decoded_r.get("foo") is None
+@pytest.mark.redismod
async def test_json_delete_with_dollar(decoded_r: redis.Redis):
doc1 = {"a": 1, "nested": {"a": 2, "b": 3}}
assert await decoded_r.json().set("doc1", "$", doc1)
assert await decoded_r.json().delete("doc1", "$..a") == 2
- res = [{"nested": {"b": 3}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == [{"nested": {"b": 3}}]
doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
assert await decoded_r.json().set("doc2", "$", doc2)
assert await decoded_r.json().delete("doc2", "$..a") == 1
- res = await decoded_r.json().get("doc2", "$")
- res = [{"nested": {"b": [True, "a", "b"]}, "b": ["a", "b"]}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc2", "$"), res, [res])
+ assert await decoded_r.json().get("doc2", "$") == [
+ {"nested": {"b": [True, "a", "b"]}, "b": ["a", "b"]}
+ ]
doc3 = [
{
@@ -364,8 +387,7 @@ async def test_json_delete_with_dollar(decoded_r: redis.Redis):
}
]
]
- res = await decoded_r.json().get("doc3", "$")
- assert_resp_response(decoded_r, res, doc3val, [doc3val])
+ assert await decoded_r.json().get("doc3", "$") == doc3val
# Test async default path
assert await decoded_r.json().delete("doc3") == 1
@@ -374,18 +396,19 @@ async def test_json_delete_with_dollar(decoded_r: redis.Redis):
await decoded_r.json().delete("not_a_document", "..a")
+@pytest.mark.redismod
async def test_json_forget_with_dollar(decoded_r: redis.Redis):
doc1 = {"a": 1, "nested": {"a": 2, "b": 3}}
assert await decoded_r.json().set("doc1", "$", doc1)
assert await decoded_r.json().forget("doc1", "$..a") == 2
- res = [{"nested": {"b": 3}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == [{"nested": {"b": 3}}]
doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
assert await decoded_r.json().set("doc2", "$", doc2)
assert await decoded_r.json().forget("doc2", "$..a") == 1
- res = [{"nested": {"b": [True, "a", "b"]}, "b": ["a", "b"]}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc2", "$"), res, [res])
+ assert await decoded_r.json().get("doc2", "$") == [
+ {"nested": {"b": [True, "a", "b"]}, "b": ["a", "b"]}
+ ]
doc3 = [
{
@@ -416,8 +439,7 @@ async def test_json_forget_with_dollar(decoded_r: redis.Redis):
}
]
]
- res = await decoded_r.json().get("doc3", "$")
- assert_resp_response(decoded_r, res, doc3val, [doc3val])
+ assert await decoded_r.json().get("doc3", "$") == doc3val
# Test async default path
assert await decoded_r.json().forget("doc3") == 1
@@ -427,6 +449,7 @@ async def test_json_forget_with_dollar(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
async def test_json_mget_dollar(decoded_r: redis.Redis):
# Test mget with multi paths
await decoded_r.json().set(
@@ -440,17 +463,11 @@ async def test_json_mget_dollar(decoded_r: redis.Redis):
{"a": 4, "b": 5, "nested": {"a": 6}, "c": None, "nested2": {"a": [None]}},
)
# Compare also to single JSON.GET
- res = [1, 3, None]
- assert_resp_response(
- decoded_r, await decoded_r.json().get("doc1", "$..a"), res, [res]
- )
- res = [4, 6, [None]]
- assert_resp_response(
- decoded_r, await decoded_r.json().get("doc2", "$..a"), res, [res]
- )
+ assert await decoded_r.json().get("doc1", "$..a") == [1, 3, None]
+ assert await decoded_r.json().get("doc2", "$..a") == [4, 6, [None]]
# Test mget with single path
- await decoded_r.json().mget("doc1", "$..a") == [1, 3, None]
+ assert await decoded_r.json().mget(["doc1"], "$..a") == [[1, 3, None]]
# Test mget with multi path
res = await decoded_r.json().mget(["doc1", "doc2"], "$..a")
assert res == [[1, 3, None], [4, 6, [None]]]
@@ -462,6 +479,7 @@ async def test_json_mget_dollar(decoded_r: redis.Redis):
assert res == [None, None]
+@pytest.mark.redismod
async def test_numby_commands_dollar(decoded_r: redis.Redis):
# Test NUMINCRBY
await decoded_r.json().set(
@@ -505,7 +523,9 @@ async def test_numby_commands_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}
)
- await decoded_r.json().numincrby("doc1", ".b[0].a", 3) == 5
+ assert_resp_response(
+ decoded_r, await decoded_r.json().numincrby("doc1", ".b[0].a", 3), 5, [5]
+ )
# Test legacy NUMMULTBY
await decoded_r.json().set(
@@ -513,39 +533,43 @@ async def test_numby_commands_dollar(decoded_r: redis.Redis):
)
with pytest.deprecated_call():
- await decoded_r.json().nummultby("doc1", ".b[0].a", 3) == 6
+ assert_resp_response(
+ decoded_r, await decoded_r.json().nummultby("doc1", ".b[0].a", 3), 6, [6]
+ )
+@pytest.mark.redismod
async def test_strappend_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}
)
# Test multi
- await decoded_r.json().strappend("doc1", "bar", "$..a") == [6, 8, None]
+ assert await decoded_r.json().strappend("doc1", "bar", "$..a") == [6, 8, None]
res = [{"a": "foobar", "nested1": {"a": "hellobar"}, "nested2": {"a": 31}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test single
- await decoded_r.json().strappend("doc1", "baz", "$.nested1.a") == [11]
+ assert await decoded_r.json().strappend("doc1", "baz", "$.nested1.a") == [11]
res = [{"a": "foobar", "nested1": {"a": "hellobarbaz"}, "nested2": {"a": 31}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
await decoded_r.json().strappend("non_existing_doc", "$..a", "err")
# Test multi
- await decoded_r.json().strappend("doc1", "bar", ".*.a") == 8
+ assert await decoded_r.json().strappend("doc1", "bar", ".*.a") == 14
res = [{"a": "foobar", "nested1": {"a": "hellobarbazbar"}, "nested2": {"a": 31}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing path
with pytest.raises(exceptions.ResponseError):
await decoded_r.json().strappend("doc1", "piu")
+@pytest.mark.redismod
async def test_strlen_dollar(decoded_r: redis.Redis):
# Test multi
await decoded_r.json().set(
@@ -558,14 +582,15 @@ async def test_strlen_dollar(decoded_r: redis.Redis):
assert res1 == res2
# Test single
- await decoded_r.json().strlen("doc1", "$.nested1.a") == [8]
- await decoded_r.json().strlen("doc1", "$.nested2.a") == [None]
+ assert await decoded_r.json().strlen("doc1", "$.nested1.a") == [8]
+ assert await decoded_r.json().strlen("doc1", "$.nested2.a") == [None]
# Test missing key
with pytest.raises(exceptions.ResponseError):
await decoded_r.json().strlen("non_existing_doc", "$..a")
+@pytest.mark.redismod
async def test_arrappend_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1",
@@ -577,7 +602,8 @@ async def test_arrappend_dollar(decoded_r: redis.Redis):
},
)
# Test multi
- await decoded_r.json().arrappend("doc1", "$..a", "bar", "racuda") == [3, 5, None]
+ res = [3, 5, None]
+ assert await decoded_r.json().arrappend("doc1", "$..a", "bar", "racuda") == res
res = [
{
"a": ["foo", "bar", "racuda"],
@@ -585,7 +611,7 @@ async def test_arrappend_dollar(decoded_r: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test single
assert await decoded_r.json().arrappend("doc1", "$.nested1.a", "baz") == [6]
@@ -596,7 +622,7 @@ async def test_arrappend_dollar(decoded_r: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
@@ -622,7 +648,7 @@ async def test_arrappend_dollar(decoded_r: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test single
assert await decoded_r.json().arrappend("doc1", ".nested1.a", "baz") == 6
res = [
@@ -632,13 +658,14 @@ async def test_arrappend_dollar(decoded_r: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
await decoded_r.json().arrappend("non_existing_doc", "$..a")
+@pytest.mark.redismod
async def test_arrinsert_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1",
@@ -660,7 +687,7 @@ async def test_arrinsert_dollar(decoded_r: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test single
assert await decoded_r.json().arrinsert("doc1", "$.nested1.a", -2, "baz") == [6]
res = [
@@ -670,13 +697,14 @@ async def test_arrinsert_dollar(decoded_r: redis.Redis):
"nested2": {"a": 31},
}
]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
await decoded_r.json().arrappend("non_existing_doc", "$..a")
+@pytest.mark.redismod
async def test_arrlen_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1",
@@ -722,6 +750,7 @@ async def test_arrlen_dollar(decoded_r: redis.Redis):
assert await decoded_r.json().arrlen("non_existing_doc", "..a") is None
+@pytest.mark.redismod
async def test_arrpop_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1",
@@ -737,7 +766,7 @@ async def test_arrpop_dollar(decoded_r: redis.Redis):
assert await decoded_r.json().arrpop("doc1", "$..a", 1) == ['"foo"', None, None]
res = [{"a": [], "nested1": {"a": ["hello", "world"]}, "nested2": {"a": 31}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
@@ -754,15 +783,16 @@ async def test_arrpop_dollar(decoded_r: redis.Redis):
},
)
# Test multi (all paths are updated, but return result of last path)
- await decoded_r.json().arrpop("doc1", "..a", "1") is None
+ assert await decoded_r.json().arrpop("doc1", "..a", "1") == "null"
res = [{"a": [], "nested1": {"a": ["hello", "world"]}, "nested2": {"a": 31}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# # Test missing key
with pytest.raises(exceptions.ResponseError):
await decoded_r.json().arrpop("non_existing_doc", "..a")
+@pytest.mark.redismod
async def test_arrtrim_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1",
@@ -776,15 +806,15 @@ async def test_arrtrim_dollar(decoded_r: redis.Redis):
# Test multi
assert await decoded_r.json().arrtrim("doc1", "$..a", "1", -1) == [0, 2, None]
res = [{"a": [], "nested1": {"a": [None, "world"]}, "nested2": {"a": 31}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
assert await decoded_r.json().arrtrim("doc1", "$..a", "1", "1") == [0, 1, None]
res = [{"a": [], "nested1": {"a": ["world"]}, "nested2": {"a": 31}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test single
assert await decoded_r.json().arrtrim("doc1", "$.nested1.a", 1, 0) == [0]
res = [{"a": [], "nested1": {"a": []}, "nested2": {"a": 31}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
@@ -807,13 +837,14 @@ async def test_arrtrim_dollar(decoded_r: redis.Redis):
# Test single
assert await decoded_r.json().arrtrim("doc1", ".nested1.a", "1", "1") == 1
res = [{"a": [], "nested1": {"a": ["world"]}, "nested2": {"a": 31}}]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
await decoded_r.json().arrtrim("non_existing_doc", "..a", 1, 1)
+@pytest.mark.redismod
async def test_objkeys_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1",
@@ -843,6 +874,7 @@ async def test_objkeys_dollar(decoded_r: redis.Redis):
assert await decoded_r.json().objkeys("doc1", "$..nowhere") == []
+@pytest.mark.redismod
async def test_objlen_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1",
@@ -897,6 +929,7 @@ def load_types_data(nested_key_name):
return jdata, types
+@pytest.mark.redismod
async def test_type_dollar(decoded_r: redis.Redis):
jdata, jtypes = load_types_data("a")
await decoded_r.json().set("doc1", "$", jdata)
@@ -915,6 +948,7 @@ async def test_type_dollar(decoded_r: redis.Redis):
)
+@pytest.mark.redismod
async def test_clear_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1",
@@ -933,7 +967,7 @@ async def test_clear_dollar(decoded_r: redis.Redis):
res = [
{"nested1": {"a": {}}, "a": [], "nested2": {"a": "claro"}, "nested3": {"a": {}}}
]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test single
await decoded_r.json().set(
@@ -955,19 +989,18 @@ async def test_clear_dollar(decoded_r: redis.Redis):
"nested3": {"a": {"baz": 50}},
}
]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing path (async defaults to root)
assert await decoded_r.json().clear("doc1") == 1
- assert_resp_response(
- decoded_r, await decoded_r.json().get("doc1", "$"), [{}], [[{}]]
- )
+ assert await decoded_r.json().get("doc1", "$") == [{}]
# Test missing key
with pytest.raises(exceptions.ResponseError):
await decoded_r.json().clear("non_existing_doc", "$..a")
+@pytest.mark.redismod
async def test_toggle_dollar(decoded_r: redis.Redis):
await decoded_r.json().set(
"doc1",
@@ -989,7 +1022,7 @@ async def test_toggle_dollar(decoded_r: redis.Redis):
"nested3": {"a": False},
}
]
- assert_resp_response(decoded_r, await decoded_r.json().get("doc1", "$"), res, [res])
+ assert await decoded_r.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
diff --git a/tests/test_asyncio/test_pipeline.py b/tests/test_asyncio/test_pipeline.py
index 3d271bf1d0..4b29360d72 100644
--- a/tests/test_asyncio/test_pipeline.py
+++ b/tests/test_asyncio/test_pipeline.py
@@ -7,7 +7,6 @@
class TestPipeline:
- @pytest.mark.onlynoncluster
async def test_pipeline_is_true(self, r):
"""Ensure pipeline instances are not false-y"""
async with r.pipeline() as pipe:
diff --git a/tests/test_asyncio/test_search.py b/tests/test_asyncio/test_search.py
index 1f1931e28a..1e1785c9eb 100644
--- a/tests/test_asyncio/test_search.py
+++ b/tests/test_asyncio/test_search.py
@@ -4,14 +4,22 @@
import time
from io import TextIOWrapper
+import numpy as np
import pytest
+import pytest_asyncio
import redis.asyncio as redis
import redis.commands.search
import redis.commands.search.aggregation as aggregations
import redis.commands.search.reducers as reducers
from redis.commands.search import AsyncSearch
-from redis.commands.search.field import GeoField, NumericField, TagField, TextField
-from redis.commands.search.indexDefinition import IndexDefinition
+from redis.commands.search.field import (
+ GeoField,
+ NumericField,
+ TagField,
+ TextField,
+ VectorField,
+)
+from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import GeoFilter, NumericFilter, Query
from redis.commands.search.result import Result
from redis.commands.search.suggestion import Suggestion
@@ -19,6 +27,7 @@
assert_resp_response,
is_resp2_connection,
skip_if_redis_enterprise,
+ skip_if_resp_version,
skip_ifmodversion_lt,
)
@@ -31,6 +40,11 @@
)
+@pytest_asyncio.fixture()
+async def decoded_r(create_redis, stack_url):
+ return await create_redis(decode_responses=True, url=stack_url)
+
+
async def waitForIndex(env, idx, timeout=None):
delay = 0.1
while True:
@@ -1554,3 +1568,53 @@ async def test_query_timeout(decoded_r: redis.Redis):
q2 = Query("foo").timeout("not_a_number")
with pytest.raises(redis.ResponseError):
await decoded_r.ft().search(q2)
+
+
+@pytest.mark.redismod
+@skip_if_resp_version(3)
+async def test_binary_and_text_fields(decoded_r: redis.Redis):
+ fake_vec = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32)
+
+ index_name = "mixed_index"
+ mixed_data = {"first_name": "🐍python", "vector_emb": fake_vec.tobytes()}
+ await decoded_r.hset(f"{index_name}:1", mapping=mixed_data)
+
+ schema = (
+ TagField("first_name"),
+ VectorField(
+ "embeddings_bio",
+ algorithm="HNSW",
+ attributes={
+ "TYPE": "FLOAT32",
+ "DIM": 4,
+ "DISTANCE_METRIC": "COSINE",
+ },
+ ),
+ )
+
+ await decoded_r.ft(index_name).create_index(
+ fields=schema,
+ definition=IndexDefinition(
+ prefix=[f"{index_name}:"], index_type=IndexType.HASH
+ ),
+ )
+
+ query = (
+ Query("*")
+ .return_field("vector_emb", decode_field=False)
+ .return_field("first_name")
+ )
+ result = await decoded_r.ft(index_name).search(query=query, query_params={})
+ docs = result.docs
+
+ decoded_vec_from_search_results = np.frombuffer(
+ docs[0]["vector_emb"], dtype=np.float32
+ )
+
+ assert np.array_equal(
+ decoded_vec_from_search_results, fake_vec
+ ), "The vectors are not equal"
+
+ assert (
+ docs[0]["first_name"] == mixed_data["first_name"]
+ ), "The text field is not decoded correctly"
diff --git a/tests/test_asyncio/test_timeseries.py b/tests/test_asyncio/test_timeseries.py
index 1302ee4fa2..0475c318ec 100644
--- a/tests/test_asyncio/test_timeseries.py
+++ b/tests/test_asyncio/test_timeseries.py
@@ -2,6 +2,7 @@
from time import sleep
import pytest
+import pytest_asyncio
import redis.asyncio as redis
from tests.conftest import (
assert_resp_response,
@@ -10,6 +11,12 @@
)
+@pytest_asyncio.fixture()
+async def decoded_r(create_redis, stack_url):
+ return await create_redis(decode_responses=True, url=stack_url)
+
+
+@pytest.mark.redismod
async def test_create(decoded_r: redis.Redis):
assert await decoded_r.ts().create(1)
assert await decoded_r.ts().create(2, retention_msecs=5)
@@ -27,6 +34,7 @@ async def test_create(decoded_r: redis.Redis):
assert_resp_response(decoded_r, 128, info.get("chunk_size"), info.get("chunkSize"))
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
async def test_create_duplicate_policy(decoded_r: redis.Redis):
# Test for duplicate policy
@@ -42,6 +50,7 @@ async def test_create_duplicate_policy(decoded_r: redis.Redis):
)
+@pytest.mark.redismod
async def test_alter(decoded_r: redis.Redis):
assert await decoded_r.ts().create(1)
res = await decoded_r.ts().info(1)
@@ -64,8 +73,9 @@ async def test_alter(decoded_r: redis.Redis):
)
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-async def test_alter_diplicate_policy(decoded_r: redis.Redis):
+async def test_alter_duplicate_policy(decoded_r: redis.Redis):
assert await decoded_r.ts().create(1)
info = await decoded_r.ts().info(1)
assert_resp_response(
@@ -78,6 +88,7 @@ async def test_alter_diplicate_policy(decoded_r: redis.Redis):
)
+@pytest.mark.redismod
async def test_add(decoded_r: redis.Redis):
assert 1 == await decoded_r.ts().add(1, 1, 1)
assert 2 == await decoded_r.ts().add(2, 2, 3, retention_msecs=10)
@@ -100,46 +111,50 @@ async def test_add(decoded_r: redis.Redis):
assert_resp_response(decoded_r, 128, info.get("chunk_size"), info.get("chunkSize"))
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-async def test_add_duplicate_policy(r: redis.Redis):
+async def test_add_duplicate_policy(decoded_r: redis.Redis):
# Test for duplicate policy BLOCK
- assert 1 == await r.ts().add("time-serie-add-ooo-block", 1, 5.0)
+ assert 1 == await decoded_r.ts().add("time-serie-add-ooo-block", 1, 5.0)
with pytest.raises(Exception):
- await r.ts().add("time-serie-add-ooo-block", 1, 5.0, duplicate_policy="block")
+ await decoded_r.ts().add(
+ "time-serie-add-ooo-block", 1, 5.0, on_duplicate="block"
+ )
# Test for duplicate policy LAST
- assert 1 == await r.ts().add("time-serie-add-ooo-last", 1, 5.0)
- assert 1 == await r.ts().add(
- "time-serie-add-ooo-last", 1, 10.0, duplicate_policy="last"
+ assert 1 == await decoded_r.ts().add("time-serie-add-ooo-last", 1, 5.0)
+ assert 1 == await decoded_r.ts().add(
+ "time-serie-add-ooo-last", 1, 10.0, on_duplicate="last"
)
- res = await r.ts().get("time-serie-add-ooo-last")
+ res = await decoded_r.ts().get("time-serie-add-ooo-last")
assert 10.0 == res[1]
# Test for duplicate policy FIRST
- assert 1 == await r.ts().add("time-serie-add-ooo-first", 1, 5.0)
- assert 1 == await r.ts().add(
- "time-serie-add-ooo-first", 1, 10.0, duplicate_policy="first"
+ assert 1 == await decoded_r.ts().add("time-serie-add-ooo-first", 1, 5.0)
+ assert 1 == await decoded_r.ts().add(
+ "time-serie-add-ooo-first", 1, 10.0, on_duplicate="first"
)
- res = await r.ts().get("time-serie-add-ooo-first")
+ res = await decoded_r.ts().get("time-serie-add-ooo-first")
assert 5.0 == res[1]
# Test for duplicate policy MAX
- assert 1 == await r.ts().add("time-serie-add-ooo-max", 1, 5.0)
- assert 1 == await r.ts().add(
- "time-serie-add-ooo-max", 1, 10.0, duplicate_policy="max"
+ assert 1 == await decoded_r.ts().add("time-serie-add-ooo-max", 1, 5.0)
+ assert 1 == await decoded_r.ts().add(
+ "time-serie-add-ooo-max", 1, 10.0, on_duplicate="max"
)
- res = await r.ts().get("time-serie-add-ooo-max")
+ res = await decoded_r.ts().get("time-serie-add-ooo-max")
assert 10.0 == res[1]
# Test for duplicate policy MIN
- assert 1 == await r.ts().add("time-serie-add-ooo-min", 1, 5.0)
- assert 1 == await r.ts().add(
- "time-serie-add-ooo-min", 1, 10.0, duplicate_policy="min"
+ assert 1 == await decoded_r.ts().add("time-serie-add-ooo-min", 1, 5.0)
+ assert 1 == await decoded_r.ts().add(
+ "time-serie-add-ooo-min", 1, 10.0, on_duplicate="min"
)
- res = await r.ts().get("time-serie-add-ooo-min")
+ res = await decoded_r.ts().get("time-serie-add-ooo-min")
assert 5.0 == res[1]
+@pytest.mark.redismod
async def test_madd(decoded_r: redis.Redis):
await decoded_r.ts().create("a")
assert [1, 2, 3] == await decoded_r.ts().madd(
@@ -147,6 +162,7 @@ async def test_madd(decoded_r: redis.Redis):
)
+@pytest.mark.redismod
async def test_incrby_decrby(decoded_r: redis.Redis):
for _ in range(100):
assert await decoded_r.ts().incrby(1, 1)
@@ -175,7 +191,7 @@ async def test_incrby_decrby(decoded_r: redis.Redis):
assert_resp_response(decoded_r, 128, info.get("chunk_size"), info.get("chunkSize"))
-@pytest.mark.onlynoncluster
+@pytest.mark.redismod
async def test_create_and_delete_rule(decoded_r: redis.Redis):
# test rule creation
time = 100
@@ -199,7 +215,8 @@ async def test_create_and_delete_rule(decoded_r: redis.Redis):
assert not info["rules"]
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
async def test_del_range(decoded_r: redis.Redis):
try:
await decoded_r.ts().delete("test", 0, 100)
@@ -215,21 +232,25 @@ async def test_del_range(decoded_r: redis.Redis):
)
-async def test_range(r: redis.Redis):
+@pytest.mark.redismod
+async def test_range(decoded_r: redis.Redis):
for i in range(100):
- await r.ts().add(1, i, i % 7)
- assert 100 == len(await r.ts().range(1, 0, 200))
+ await decoded_r.ts().add(1, i, i % 7)
+ assert 100 == len(await decoded_r.ts().range(1, 0, 200))
for i in range(100):
- await r.ts().add(1, i + 200, i % 7)
- assert 200 == len(await r.ts().range(1, 0, 500))
+ await decoded_r.ts().add(1, i + 200, i % 7)
+ assert 200 == len(await decoded_r.ts().range(1, 0, 500))
# last sample isn't returned
assert 20 == len(
- await r.ts().range(1, 0, 500, aggregation_type="avg", bucket_size_msec=10)
+ await decoded_r.ts().range(
+ 1, 0, 500, aggregation_type="avg", bucket_size_msec=10
+ )
)
- assert 10 == len(await r.ts().range(1, 0, 500, count=10))
+ assert 10 == len(await decoded_r.ts().range(1, 0, 500, count=10))
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
async def test_range_advanced(decoded_r: redis.Redis):
for i in range(100):
await decoded_r.ts().add(1, i, i % 7)
@@ -259,7 +280,8 @@ async def test_range_advanced(decoded_r: redis.Redis):
assert_resp_response(decoded_r, res, [(0, 2.55), (10, 3.0)], [[0, 2.55], [10, 3.0]])
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
async def test_rev_range(decoded_r: redis.Redis):
for i in range(100):
await decoded_r.ts().add(1, i, i % 7)
@@ -303,6 +325,7 @@ async def test_rev_range(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
async def test_multi_range(decoded_r: redis.Redis):
await decoded_r.ts().create(1, labels={"Test": "This", "team": "ny"})
await decoded_r.ts().create(
@@ -357,7 +380,8 @@ async def test_multi_range(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
async def test_multi_range_advanced(decoded_r: redis.Redis):
await decoded_r.ts().create(1, labels={"Test": "This", "team": "ny"})
await decoded_r.ts().create(
@@ -474,7 +498,8 @@ async def test_multi_range_advanced(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
async def test_multi_reverse_range(decoded_r: redis.Redis):
await decoded_r.ts().create(1, labels={"Test": "This", "team": "ny"})
await decoded_r.ts().create(
@@ -636,6 +661,7 @@ async def test_multi_reverse_range(decoded_r: redis.Redis):
assert [[1, 10.0], [0, 1.0]] == res["1"][2]
+@pytest.mark.redismod
async def test_get(decoded_r: redis.Redis):
name = "test"
await decoded_r.ts().create(name)
@@ -647,6 +673,7 @@ async def test_get(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
async def test_mget(decoded_r: redis.Redis):
await decoded_r.ts().create(1, labels={"Test": "This"})
await decoded_r.ts().create(2, labels={"Test": "This", "Taste": "That"})
@@ -681,6 +708,7 @@ async def test_mget(decoded_r: redis.Redis):
assert {"Taste": "That", "Test": "This"} == res["2"][0]
+@pytest.mark.redismod
async def test_info(decoded_r: redis.Redis):
await decoded_r.ts().create(
1, retention_msecs=5, labels={"currentLabel": "currentData"}
@@ -692,8 +720,9 @@ async def test_info(decoded_r: redis.Redis):
assert info["labels"]["currentLabel"] == "currentData"
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-async def testInfoDuplicatePolicy(decoded_r: redis.Redis):
+async def test_info_duplicate_policy(decoded_r: redis.Redis):
await decoded_r.ts().create(
1, retention_msecs=5, labels={"currentLabel": "currentData"}
)
@@ -710,6 +739,7 @@ async def testInfoDuplicatePolicy(decoded_r: redis.Redis):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
async def test_query_index(decoded_r: redis.Redis):
await decoded_r.ts().create(1, labels={"Test": "This"})
await decoded_r.ts().create(2, labels={"Test": "This", "Taste": "That"})
@@ -720,25 +750,124 @@ async def test_query_index(decoded_r: redis.Redis):
)
-# # async def test_pipeline(r: redis.Redis):
-# pipeline = await r.ts().pipeline()
-# pipeline.create("with_pipeline")
-# for i in range(100):
-# pipeline.add("with_pipeline", i, 1.1 * i)
-# pipeline.execute()
-
-# info = await r.ts().info("with_pipeline")
-# assert info.lastTimeStamp == 99
-# assert info.total_samples == 100
-# assert await r.ts().get("with_pipeline")[1] == 99 * 1.1
-
-
+@pytest.mark.redismod
async def test_uncompressed(decoded_r: redis.Redis):
await decoded_r.ts().create("compressed")
await decoded_r.ts().create("uncompressed", uncompressed=True)
+ for i in range(1000):
+ await decoded_r.ts().add("compressed", i, i)
+ await decoded_r.ts().add("uncompressed", i, i)
compressed_info = await decoded_r.ts().info("compressed")
uncompressed_info = await decoded_r.ts().info("uncompressed")
if is_resp2_connection(decoded_r):
assert compressed_info.memory_usage != uncompressed_info.memory_usage
else:
assert compressed_info["memoryUsage"] != uncompressed_info["memoryUsage"]
+
+
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+async def test_create_with_insertion_filters(decoded_r: redis.Redis):
+ await decoded_r.ts().create(
+ "time-series-1",
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+ assert 1000 == await decoded_r.ts().add("time-series-1", 1000, 1.0)
+ assert 1010 == await decoded_r.ts().add("time-series-1", 1010, 11.0)
+ assert 1010 == await decoded_r.ts().add("time-series-1", 1013, 10.0)
+ assert 1020 == await decoded_r.ts().add("time-series-1", 1020, 11.5)
+ assert 1021 == await decoded_r.ts().add("time-series-1", 1021, 22.0)
+
+ data_points = await decoded_r.ts().range("time-series-1", "-", "+")
+ assert_resp_response(
+ decoded_r,
+ data_points,
+ [(1000, 1.0), (1010, 11.0), (1020, 11.5), (1021, 22.0)],
+ [[1000, 1.0], [1010, 11.0], [1020, 11.5], [1021, 22.0]],
+ )
+
+
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+async def test_alter_with_insertion_filters(decoded_r: redis.Redis):
+ assert 1000 == await decoded_r.ts().add("time-series-1", 1000, 1.0)
+ assert 1010 == await decoded_r.ts().add("time-series-1", 1010, 11.0)
+ assert 1013 == await decoded_r.ts().add("time-series-1", 1013, 10.0)
+
+ await decoded_r.ts().alter(
+ "time-series-1",
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+
+ assert 1013 == await decoded_r.ts().add("time-series-1", 1015, 11.5)
+
+ data_points = await decoded_r.ts().range("time-series-1", "-", "+")
+ assert_resp_response(
+ decoded_r,
+ data_points,
+ [(1000, 1.0), (1010, 11.0), (1013, 10.0)],
+ [[1000, 1.0], [1010, 11.0], [1013, 10.0]],
+ )
+
+
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+async def test_add_with_insertion_filters(decoded_r: redis.Redis):
+ assert 1000 == await decoded_r.ts().add(
+ "time-series-1",
+ 1000,
+ 1.0,
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+
+ assert 1000 == await decoded_r.ts().add("time-series-1", 1004, 3.0)
+
+ data_points = await decoded_r.ts().range("time-series-1", "-", "+")
+ assert_resp_response(decoded_r, data_points, [(1000, 1.0)], [[1000, 1.0]])
+
+
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+async def test_incrby_with_insertion_filters(decoded_r: redis.Redis):
+ assert 1000 == await decoded_r.ts().incrby(
+ "time-series-1",
+ 1.0,
+ timestamp=1000,
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+
+ assert 1000 == await decoded_r.ts().incrby("time-series-1", 3.0, timestamp=1000)
+
+ data_points = await decoded_r.ts().range("time-series-1", "-", "+")
+ assert_resp_response(decoded_r, data_points, [(1000, 1.0)], [[1000, 1.0]])
+
+ assert 1000 == await decoded_r.ts().incrby("time-series-1", 10.1, timestamp=1000)
+
+ data_points = await decoded_r.ts().range("time-series-1", "-", "+")
+ assert_resp_response(decoded_r, data_points, [(1000, 11.1)], [[1000, 11.1]])
+
+
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+async def test_decrby_with_insertion_filters(decoded_r: redis.Redis):
+ assert 1000 == await decoded_r.ts().decrby(
+ "time-series-1",
+ 1.0,
+ timestamp=1000,
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+
+ assert 1000 == await decoded_r.ts().decrby("time-series-1", 3.0, timestamp=1000)
+
+ data_points = await decoded_r.ts().range("time-series-1", "-", "+")
+ assert_resp_response(decoded_r, data_points, [(1000, -1.0)], [[1000, -1.0]])
+
+ assert 1000 == await decoded_r.ts().decrby("time-series-1", 10.1, timestamp=1000)
+
+ data_points = await decoded_r.ts().range("time-series-1", "-", "+")
+ assert_resp_response(decoded_r, data_points, [(1000, -11.1)], [[1000, -11.1]])
diff --git a/tests/test_bloom.py b/tests/test_bloom.py
index 464a946f54..d1a0484225 100644
--- a/tests/test_bloom.py
+++ b/tests/test_bloom.py
@@ -5,7 +5,20 @@
from redis.exceptions import ModuleError, RedisError
from redis.utils import HIREDIS_AVAILABLE
-from .conftest import assert_resp_response, is_resp2_connection, skip_ifmodversion_lt
+from .conftest import (
+ _get_client,
+ assert_resp_response,
+ is_resp2_connection,
+ skip_ifmodversion_lt,
+)
+
+
+@pytest.fixture()
+def decoded_r(request, stack_url):
+ with _get_client(
+ redis.Redis, request, decode_responses=True, from_url=stack_url
+ ) as client:
+ yield client
def intlist(obj):
@@ -24,6 +37,7 @@ def client(decoded_r):
return decoded_r
+@pytest.mark.redismod
def test_create(client):
"""Test CREATE/RESERVE calls"""
assert client.bf().create("bloom", 0.01, 1000)
@@ -38,6 +52,7 @@ def test_create(client):
assert client.topk().reserve("topk", 5, 100, 5, 0.9)
+@pytest.mark.redismod
def test_bf_reserve(client):
"""Testing BF.RESERVE"""
assert client.bf().reserve("bloom", 0.01, 1000)
@@ -53,10 +68,12 @@ def test_bf_reserve(client):
@pytest.mark.experimental
+@pytest.mark.redismod
def test_tdigest_create(client):
assert client.tdigest().create("tDigest", 100)
+@pytest.mark.redismod
def test_bf_add(client):
assert client.bf().create("bloom", 0.01, 1000)
assert 1 == client.bf().add("bloom", "foo")
@@ -69,6 +86,7 @@ def test_bf_add(client):
assert [1, 0] == intlist(client.bf().mexists("bloom", "foo", "noexist"))
+@pytest.mark.redismod
def test_bf_insert(client):
assert client.bf().create("bloom", 0.01, 1000)
assert [1] == intlist(client.bf().insert("bloom", ["foo"]))
@@ -99,6 +117,7 @@ def test_bf_insert(client):
)
+@pytest.mark.redismod
def test_bf_scandump_and_loadchunk(client):
# Store a filter
client.bf().create("myBloom", "0.0001", "1000")
@@ -150,6 +169,7 @@ def do_verify():
client.bf().create("myBloom", "0.0001", "10000000")
+@pytest.mark.redismod
def test_bf_info(client):
expansion = 4
# Store a filter
@@ -181,6 +201,7 @@ def test_bf_info(client):
assert True
+@pytest.mark.redismod
def test_bf_card(client):
# return 0 if the key does not exist
assert client.bf().card("not_exist") == 0
@@ -195,6 +216,7 @@ def test_bf_card(client):
client.bf().card("setKey")
+@pytest.mark.redismod
def test_cf_add_and_insert(client):
assert client.cf().create("cuckoo", 1000)
assert client.cf().add("cuckoo", "filter")
@@ -220,6 +242,7 @@ def test_cf_add_and_insert(client):
)
+@pytest.mark.redismod
def test_cf_exists_and_del(client):
assert client.cf().create("cuckoo", 1000)
assert client.cf().add("cuckoo", "filter")
@@ -232,6 +255,7 @@ def test_cf_exists_and_del(client):
assert 0 == client.cf().count("cuckoo", "filter")
+@pytest.mark.redismod
def test_cms(client):
assert client.cms().initbydim("dim", 1000, 5)
assert client.cms().initbyprob("prob", 0.01, 0.01)
@@ -248,6 +272,7 @@ def test_cms(client):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
def test_cms_merge(client):
assert client.cms().initbydim("A", 1000, 5)
assert client.cms().initbydim("B", 1000, 5)
@@ -264,6 +289,7 @@ def test_cms_merge(client):
assert [16, 15, 21] == client.cms().query("C", "foo", "bar", "baz")
+@pytest.mark.redismod
def test_topk(client):
# test list with empty buckets
assert client.topk().reserve("topk", 3, 50, 4, 0.9)
@@ -343,6 +369,7 @@ def test_topk(client):
assert 0.9 == round(float(info["decay"]), 1)
+@pytest.mark.redismod
def test_topk_incrby(client):
client.flushdb()
assert client.topk().reserve("topk", 3, 10, 3, 1)
@@ -357,6 +384,7 @@ def test_topk_incrby(client):
@pytest.mark.experimental
+@pytest.mark.redismod
def test_tdigest_reset(client):
assert client.tdigest().create("tDigest", 10)
# reset on empty histogram
@@ -373,6 +401,7 @@ def test_tdigest_reset(client):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
def test_tdigest_merge(client):
assert client.tdigest().create("to-tDigest", 10)
assert client.tdigest().create("from-tDigest", 10)
@@ -400,6 +429,7 @@ def test_tdigest_merge(client):
@pytest.mark.experimental
+@pytest.mark.redismod
def test_tdigest_min_and_max(client):
assert client.tdigest().create("tDigest", 100)
# insert data-points into sketch
@@ -410,6 +440,7 @@ def test_tdigest_min_and_max(client):
@pytest.mark.experimental
+@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.0", "bf")
def test_tdigest_quantile(client):
assert client.tdigest().create("tDigest", 500)
@@ -431,6 +462,7 @@ def test_tdigest_quantile(client):
@pytest.mark.experimental
+@pytest.mark.redismod
def test_tdigest_cdf(client):
assert client.tdigest().create("tDigest", 100)
# insert data-points into sketch
@@ -442,6 +474,7 @@ def test_tdigest_cdf(client):
@pytest.mark.experimental
+@pytest.mark.redismod
@skip_ifmodversion_lt("2.4.0", "bf")
def test_tdigest_trimmed_mean(client):
assert client.tdigest().create("tDigest", 100)
@@ -452,6 +485,7 @@ def test_tdigest_trimmed_mean(client):
@pytest.mark.experimental
+@pytest.mark.redismod
def test_tdigest_rank(client):
assert client.tdigest().create("t-digest", 500)
assert client.tdigest().add("t-digest", list(range(0, 20)))
@@ -462,6 +496,7 @@ def test_tdigest_rank(client):
@pytest.mark.experimental
+@pytest.mark.redismod
def test_tdigest_revrank(client):
assert client.tdigest().create("t-digest", 500)
assert client.tdigest().add("t-digest", list(range(0, 20)))
@@ -471,6 +506,7 @@ def test_tdigest_revrank(client):
@pytest.mark.experimental
+@pytest.mark.redismod
def test_tdigest_byrank(client):
assert client.tdigest().create("t-digest", 500)
assert client.tdigest().add("t-digest", list(range(1, 11)))
@@ -482,6 +518,7 @@ def test_tdigest_byrank(client):
@pytest.mark.experimental
+@pytest.mark.redismod
def test_tdigest_byrevrank(client):
assert client.tdigest().create("t-digest", 500)
assert client.tdigest().add("t-digest", list(range(1, 11)))
diff --git a/tests/test_cluster.py b/tests/test_cluster.py
index 5383390090..229e0fc6e6 100644
--- a/tests/test_cluster.py
+++ b/tests/test_cluster.py
@@ -151,7 +151,9 @@ def cleanup():
r.config_set("slowlog-max-len", 128)
-def get_mocked_redis_client(func=None, *args, **kwargs):
+def get_mocked_redis_client(
+ func=None, cluster_slots_raise_error=False, *args, **kwargs
+):
"""
Return a stable RedisCluster object that have deterministic
nodes and slots setup to remove the problem of different IP addresses
@@ -164,8 +166,11 @@ def get_mocked_redis_client(func=None, *args, **kwargs):
def execute_command(*_args, **_kwargs):
if _args[0] == "CLUSTER SLOTS":
- mock_cluster_slots = cluster_slots
- return mock_cluster_slots
+ if cluster_slots_raise_error:
+ raise ResponseError()
+ else:
+ mock_cluster_slots = cluster_slots
+ return mock_cluster_slots
elif _args[0] == "COMMAND":
return {"get": [], "set": []}
elif _args[0] == "INFO":
@@ -2442,6 +2447,7 @@ def try_delete_libs(self, r, *lib_names):
except Exception:
pass
+ @pytest.mark.redismod
@skip_if_server_version_lt("7.1.140")
def test_tfunction_load_delete(self, r):
r.gears_refresh_cluster()
@@ -2450,6 +2456,7 @@ def test_tfunction_load_delete(self, r):
assert r.tfunction_load(lib_code)
assert r.tfunction_delete("lib1")
+ @pytest.mark.redismod
@skip_if_server_version_lt("7.1.140")
def test_tfunction_list(self, r):
r.gears_refresh_cluster()
@@ -2473,6 +2480,7 @@ def test_tfunction_list(self, r):
assert r.tfunction_delete("lib2")
assert r.tfunction_delete("lib3")
+ @pytest.mark.redismod
@skip_if_server_version_lt("7.1.140")
def test_tfcall(self, r):
r.gears_refresh_cluster()
@@ -2651,7 +2659,10 @@ def test_init_slots_cache_cluster_mode_disabled(self):
"""
with pytest.raises(RedisClusterException) as e:
get_mocked_redis_client(
- host=default_host, port=default_port, cluster_enabled=False
+ cluster_slots_raise_error=True,
+ host=default_host,
+ port=default_port,
+ cluster_enabled=False,
)
assert "Cluster mode is not enabled on this node" in str(e.value)
diff --git a/tests/test_command_parser.py b/tests/test_command_parser.py
index e3b44a147f..5c23a7096f 100644
--- a/tests/test_command_parser.py
+++ b/tests/test_command_parser.py
@@ -1,5 +1,6 @@
import pytest
from redis._parsers import CommandsParser
+from redis.utils import HIREDIS_AVAILABLE
from .conftest import (
assert_resp_response,
@@ -8,6 +9,9 @@
)
+# The response to COMMAND contains maps inside sets, which are not handled
+# by the hiredis-py parser (see https://github.com/redis/hiredis-py/issues/188)
+@pytest.mark.skipif(HIREDIS_AVAILABLE, reason="PythonParser only")
class TestCommandsParser:
def test_init_commands(self, r):
commands_parser = CommandsParser(r)
diff --git a/tests/test_commands.py b/tests/test_commands.py
index 0e93d340f4..42376b50d8 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -18,6 +18,7 @@
parse_info,
)
from redis.client import EMPTY_RESPONSE, NEVER_DECODE
+from redis.utils import HIREDIS_AVAILABLE
from .conftest import (
_get_client,
@@ -707,6 +708,20 @@ def test_client_kill_filter_by_user(self, r, request):
assert c["user"] != killuser
r.acl_deluser(killuser)
+ @skip_if_server_version_lt("7.3.240")
+ @skip_if_redis_enterprise()
+ @pytest.mark.onlynoncluster
+ def test_client_kill_filter_by_maxage(self, r, request):
+ r2 = _get_client(redis.Redis, request, flushdb=False)
+ name = "target-foobar"
+ r2.client_setname(name)
+ time.sleep(4)
+ initial_clients = [c["name"] for c in r.client_list()]
+ assert name in initial_clients
+ r.client_kill_filter(maxage=2)
+ final_clients = [c["name"] for c in r.client_list()]
+ assert name not in final_clients
+
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.9.50")
@skip_if_redis_enterprise()
@@ -1809,44 +1824,51 @@ def try_delete_libs(self, r, *lib_names):
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.1.140")
- def test_tfunction_load_delete(self, r):
- self.try_delete_libs(r, "lib1")
+ def test_tfunction_load_delete(self, stack_r):
+ self.try_delete_libs(stack_r, "lib1")
lib_code = self.generate_lib_code("lib1")
- assert r.tfunction_load(lib_code)
- assert r.tfunction_delete("lib1")
+ assert stack_r.tfunction_load(lib_code)
+ assert stack_r.tfunction_delete("lib1")
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.1.140")
- def test_tfunction_list(self, r):
- self.try_delete_libs(r, "lib1", "lib2", "lib3")
- assert r.tfunction_load(self.generate_lib_code("lib1"))
- assert r.tfunction_load(self.generate_lib_code("lib2"))
- assert r.tfunction_load(self.generate_lib_code("lib3"))
+ def test_tfunction_list(self, stack_r):
+ self.try_delete_libs(stack_r, "lib1", "lib2", "lib3")
+ assert stack_r.tfunction_load(self.generate_lib_code("lib1"))
+ assert stack_r.tfunction_load(self.generate_lib_code("lib2"))
+ assert stack_r.tfunction_load(self.generate_lib_code("lib3"))
# test error thrown when verbose > 4
with pytest.raises(redis.exceptions.DataError):
- assert r.tfunction_list(verbose=8)
+ assert stack_r.tfunction_list(verbose=8)
- functions = r.tfunction_list(verbose=1)
+ functions = stack_r.tfunction_list(verbose=1)
assert len(functions) == 3
expected_names = [b"lib1", b"lib2", b"lib3"]
- actual_names = [functions[0][13], functions[1][13], functions[2][13]]
+ if is_resp2_connection(stack_r):
+ actual_names = [functions[0][13], functions[1][13], functions[2][13]]
+ else:
+ actual_names = [
+ functions[0][b"name"],
+ functions[1][b"name"],
+ functions[2][b"name"],
+ ]
assert sorted(expected_names) == sorted(actual_names)
- assert r.tfunction_delete("lib1")
- assert r.tfunction_delete("lib2")
- assert r.tfunction_delete("lib3")
+ assert stack_r.tfunction_delete("lib1")
+ assert stack_r.tfunction_delete("lib2")
+ assert stack_r.tfunction_delete("lib3")
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.1.140")
- def test_tfcall(self, r):
- self.try_delete_libs(r, "lib1")
- assert r.tfunction_load(self.generate_lib_code("lib1"))
- assert r.tfcall("lib1", "foo") == b"bar"
- assert r.tfcall_async("lib1", "foo") == b"bar"
+ def test_tfcall(self, stack_r):
+ self.try_delete_libs(stack_r, "lib1")
+ assert stack_r.tfunction_load(self.generate_lib_code("lib1"))
+ assert stack_r.tfcall("lib1", "foo") == b"bar"
+ assert stack_r.tfcall_async("lib1", "foo") == b"bar"
- assert r.tfunction_delete("lib1")
+ assert stack_r.tfunction_delete("lib1")
def test_ttl(self, r):
r["a"] = "1"
@@ -2162,6 +2184,19 @@ def test_hscan(self, r):
assert dic == {b"a": b"1", b"b": b"2", b"c": b"3"}
_, dic = r.hscan("a", match="a")
assert dic == {b"a": b"1"}
+ _, dic = r.hscan("a_notset")
+ assert dic == {}
+
+ @skip_if_server_version_lt("7.3.240")
+ def test_hscan_novalues(self, r):
+ r.hset("a", mapping={"a": 1, "b": 2, "c": 3})
+ cursor, keys = r.hscan("a", no_values=True)
+ assert cursor == 0
+ assert sorted(keys) == [b"a", b"b", b"c"]
+ _, keys = r.hscan("a", match="a", no_values=True)
+ assert keys == [b"a"]
+ _, keys = r.hscan("a_notset", no_values=True)
+ assert keys == []
@skip_if_server_version_lt("2.8.0")
def test_hscan_iter(self, r):
@@ -2170,6 +2205,18 @@ def test_hscan_iter(self, r):
assert dic == {b"a": b"1", b"b": b"2", b"c": b"3"}
dic = dict(r.hscan_iter("a", match="a"))
assert dic == {b"a": b"1"}
+ dic = dict(r.hscan_iter("a_notset"))
+ assert dic == {}
+
+ @skip_if_server_version_lt("7.3.240")
+ def test_hscan_iter_novalues(self, r):
+ r.hset("a", mapping={"a": 1, "b": 2, "c": 3})
+ keys = list(r.hscan_iter("a", no_values=True))
+ assert keys == [b"a", b"b", b"c"]
+ keys = list(r.hscan_iter("a", match="a", no_values=True))
+ assert keys == [b"a"]
+ keys = list(r.hscan_iter("a_notset", no_values=True))
+ assert keys == []
@skip_if_server_version_lt("2.8.0")
def test_zscan(self, r):
@@ -4417,14 +4464,23 @@ def test_xinfo_stream(self, r):
assert info["entries-added"] == 2
assert info["recorded-first-entry-id"] == m1
+ r.xtrim(stream, 0)
+ info = r.xinfo_stream(stream)
+ assert info["length"] == 0
+ assert info["first-entry"] is None
+ assert info["last-entry"] is None
+
@skip_if_server_version_lt("6.0.0")
def test_xinfo_stream_full(self, r):
stream = "stream"
group = "group"
m1 = r.xadd(stream, {"foo": "bar"})
- r.xgroup_create(stream, group, 0)
info = r.xinfo_stream(stream, full=True)
+ assert info["length"] == 1
+ assert len(info["groups"]) == 0
+ r.xgroup_create(stream, group, 0)
+ info = r.xinfo_stream(stream, full=True)
assert info["length"] == 1
assert_resp_response_in(
r,
@@ -4434,6 +4490,11 @@ def test_xinfo_stream_full(self, r):
)
assert len(info["groups"]) == 1
+ r.xreadgroup(group, "consumer", streams={stream: ">"})
+ info = r.xinfo_stream(stream, full=True)
+ consumer = info["groups"][0]["consumers"][0]
+ assert isinstance(consumer, dict)
+
@skip_if_server_version_lt("5.0.0")
def test_xlen(self, r):
stream = "stream"
@@ -4960,6 +5021,9 @@ def test_command_getkeys(self, r):
r, res, ["key1", "key2", "key3"], [b"key1", b"key2", b"key3"]
)
+ # The response to COMMAND contains maps inside sets, which are not handled
+ # by the hiredis-py parser (see https://github.com/redis/hiredis-py/issues/188)
+ @pytest.mark.skipif(HIREDIS_AVAILABLE, reason="PythonParser only")
@skip_if_server_version_lt("2.8.13")
def test_command(self, r):
res = r.command()
@@ -4988,25 +5052,27 @@ def test_command_getkeysandflags(self, r: redis.Redis):
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("4.0.0")
@skip_if_redis_enterprise()
- def test_module(self, r):
+ def test_module(self, stack_r):
with pytest.raises(redis.exceptions.ModuleError) as excinfo:
- r.module_load("/some/fake/path")
+ stack_r.module_load("/some/fake/path")
assert "Error loading the extension." in str(excinfo.value)
with pytest.raises(redis.exceptions.ModuleError) as excinfo:
- r.module_load("/some/fake/path", "arg1", "arg2", "arg3", "arg4")
+ stack_r.module_load("/some/fake/path", "arg1", "arg2", "arg3", "arg4")
assert "Error loading the extension." in str(excinfo.value)
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.0.0")
@skip_if_redis_enterprise()
- def test_module_loadex(self, r: redis.Redis):
+ def test_module_loadex(self, stack_r: redis.Redis):
with pytest.raises(redis.exceptions.ModuleError) as excinfo:
- r.module_loadex("/some/fake/path")
+ stack_r.module_loadex("/some/fake/path")
assert "Error loading the extension." in str(excinfo.value)
with pytest.raises(redis.exceptions.ModuleError) as excinfo:
- r.module_loadex("/some/fake/path", ["name", "value"], ["arg1", "arg2"])
+ stack_r.module_loadex(
+ "/some/fake/path", ["name", "value"], ["arg1", "arg2"]
+ )
assert "Error loading the extension." in str(excinfo.value)
@skip_if_server_version_lt("2.6.0")
diff --git a/tests/test_connect.py b/tests/test_connect.py
index ec686540fa..d7ca04b651 100644
--- a/tests/test_connect.py
+++ b/tests/test_connect.py
@@ -58,13 +58,14 @@ def test_uds_connect(uds_address):
)
def test_tcp_ssl_connect(tcp_address, ssl_min_version):
host, port = tcp_address
- certfile = get_ssl_filename("server-cert.pem")
- keyfile = get_ssl_filename("server-key.pem")
+ certfile = get_ssl_filename("client-cert.pem")
+ keyfile = get_ssl_filename("client-key.pem")
+ ca_certfile = get_ssl_filename("ca-cert.pem")
conn = SSLConnection(
host=host,
port=port,
client_name=_CLIENT_NAME,
- ssl_ca_certs=certfile,
+ ssl_ca_certs=ca_certfile,
socket_timeout=10,
ssl_min_version=ssl_min_version,
)
@@ -82,13 +83,14 @@ def test_tcp_ssl_connect(tcp_address, ssl_min_version):
)
def test_tcp_ssl_tls12_custom_ciphers(tcp_address, ssl_ciphers):
host, port = tcp_address
- certfile = get_ssl_filename("server-cert.pem")
- keyfile = get_ssl_filename("server-key.pem")
+ certfile = get_ssl_filename("client-cert.pem")
+ keyfile = get_ssl_filename("client-key.pem")
+ ca_certfile = get_ssl_filename("ca-cert.pem")
conn = SSLConnection(
host=host,
port=port,
client_name=_CLIENT_NAME,
- ssl_ca_certs=certfile,
+ ssl_ca_certs=ca_certfile,
socket_timeout=10,
ssl_min_version=ssl.TLSVersion.TLSv1_2,
ssl_ciphers=ssl_ciphers,
diff --git a/tests/test_connection.py b/tests/test_connection.py
index bff249559e..69275d58c0 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -296,3 +296,53 @@ def mock_disconnect(_):
assert called == 1
pool.disconnect()
+
+
+@pytest.mark.parametrize(
+ "conn, error, expected_message",
+ [
+ (SSLConnection(), OSError(), "Error connecting to localhost:6379."),
+ (SSLConnection(), OSError(12), "Error 12 connecting to localhost:6379."),
+ (
+ SSLConnection(),
+ OSError(12, "Some Error"),
+ "Error 12 connecting to localhost:6379. Some Error.",
+ ),
+ (
+ UnixDomainSocketConnection(path="unix:///tmp/redis.sock"),
+ OSError(),
+ "Error connecting to unix:///tmp/redis.sock.",
+ ),
+ (
+ UnixDomainSocketConnection(path="unix:///tmp/redis.sock"),
+ OSError(12),
+ "Error 12 connecting to unix:///tmp/redis.sock.",
+ ),
+ (
+ UnixDomainSocketConnection(path="unix:///tmp/redis.sock"),
+ OSError(12, "Some Error"),
+ "Error 12 connecting to unix:///tmp/redis.sock. Some Error.",
+ ),
+ ],
+)
+def test_format_error_message(conn, error, expected_message):
+ """Test that the _error_message function formats errors correctly"""
+ error_message = conn._error_message(error)
+ assert error_message == expected_message
+
+
+def test_network_connection_failure():
+ with pytest.raises(ConnectionError) as e:
+ redis = Redis(port=9999)
+ redis.set("a", "b")
+ assert str(e.value) == "Error 111 connecting to localhost:9999. Connection refused."
+
+
+def test_unix_socket_connection_failure():
+ with pytest.raises(ConnectionError) as e:
+ redis = Redis(unix_socket_path="unix:///tmp/a.sock")
+ redis.set("a", "b")
+ assert (
+ str(e.value)
+ == "Error 2 connecting to unix:///tmp/a.sock. No such file or directory."
+ )
diff --git a/tests/test_connection_pool.py b/tests/test_connection_pool.py
index d1e984ee9c..dee7c554d3 100644
--- a/tests/test_connection_pool.py
+++ b/tests/test_connection_pool.py
@@ -95,11 +95,8 @@ def test_repr_contains_db_info_tcp(self):
pool = self.get_pool(
connection_kwargs=connection_kwargs, connection_class=redis.Connection
)
- expected = (
- "ConnectionPool>"
- )
- assert repr(pool) == expected
+ expected = "host=localhost,port=6379,db=1,client_name=test-client"
+ assert expected in repr(pool)
def test_repr_contains_db_info_unix(self):
connection_kwargs = {"path": "/abc", "db": 1, "client_name": "test-client"}
@@ -107,11 +104,8 @@ def test_repr_contains_db_info_unix(self):
connection_kwargs=connection_kwargs,
connection_class=redis.UnixDomainSocketConnection,
)
- expected = (
- "ConnectionPool>"
- )
- assert repr(pool) == expected
+ expected = "path=/abc,db=1,client_name=test-client"
+ assert expected in repr(pool)
class TestBlockingConnectionPool:
@@ -190,11 +184,8 @@ def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(
host="localhost", port=6379, client_name="test-client"
)
- expected = (
- "ConnectionPool>"
- )
- assert repr(pool) == expected
+ expected = "host=localhost,port=6379,db=0,client_name=test-client"
+ assert expected in repr(pool)
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
@@ -202,11 +193,8 @@ def test_repr_contains_db_info_unix(self):
path="abc",
client_name="test-client",
)
- expected = (
- "ConnectionPool>"
- )
- assert repr(pool) == expected
+ expected = "path=abc,db=0,client_name=test-client"
+ assert expected in repr(pool)
class TestConnectionPoolURLParsing:
@@ -579,7 +567,9 @@ def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url("redis://localhost")
pool = connection.connection_pool
- assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
+ assert re.match(
+ r"< .*?([^\.]+) \( < .*?([^\.]+) \( (.+) \) > \) >", repr(pool), re.VERBOSE
+ ).groups() == (
"ConnectionPool",
"Connection",
"host=localhost,port=6379,db=0",
@@ -589,7 +579,9 @@ def test_connect_from_url_unix(self):
connection = redis.Redis.from_url("unix:///path/to/socket")
pool = connection.connection_pool
- assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
+ assert re.match(
+ r"< .*?([^\.]+) \( < .*?([^\.]+) \( (.+) \) > \) >", repr(pool), re.VERBOSE
+ ).groups() == (
"ConnectionPool",
"UnixDomainSocketConnection",
"path=/path/to/socket,db=0",
diff --git a/tests/test_graph.py b/tests/test_graph.py
index 6007de896b..680b8af645 100644
--- a/tests/test_graph.py
+++ b/tests/test_graph.py
@@ -20,18 +20,20 @@
QueryResult,
)
from redis.exceptions import ResponseError
-from tests.conftest import _get_client, skip_if_redis_enterprise
+from tests.conftest import _get_client, skip_if_redis_enterprise, skip_if_resp_version
@pytest.fixture
-def client(request):
- r = _get_client(Redis, request, decode_responses=True)
+def client(request, stack_url):
+ r = _get_client(
+ Redis, request, decode_responses=True, from_url="redis://localhost:6480"
+ )
r.flushdb()
return r
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_bulk(client):
with pytest.raises(NotImplementedError):
client.graph().bulk()
@@ -39,7 +41,7 @@ def test_bulk(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_graph_creation(client):
graph = client.graph()
@@ -85,7 +87,7 @@ def test_graph_creation(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_array_functions(client):
query = """CREATE (p:person{name:'a',age:32, array:[0,1,2]})"""
client.graph().query(query)
@@ -107,7 +109,7 @@ def test_array_functions(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_path(client):
node0 = Node(node_id=0, label="L1")
node1 = Node(node_id=1, label="L1")
@@ -128,7 +130,7 @@ def test_path(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_param(client):
params = [1, 2.3, "str", True, False, None, [0, 1, 2], r"\" RETURN 1337 //"]
query = "RETURN $param"
@@ -139,7 +141,7 @@ def test_param(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_map(client):
query = "RETURN {a:1, b:'str', c:NULL, d:[1,2,3], e:True, f:{x:1, y:2}}"
@@ -157,7 +159,7 @@ def test_map(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_point(client):
query = "RETURN point({latitude: 32.070794860, longitude: 34.820751118})"
expected_lat = 32.070794860
@@ -175,7 +177,7 @@ def test_point(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_index_response(client):
result_set = client.graph().query("CREATE INDEX ON :person(age)")
assert 1 == result_set.indices_created
@@ -191,7 +193,7 @@ def test_index_response(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_stringify_query_result(client):
graph = client.graph()
@@ -246,7 +248,7 @@ def test_stringify_query_result(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_optional_match(client):
# Build a graph of form (a)-[R]->(b)
node0 = Node(node_id=0, label="L1", properties={"value": "a"})
@@ -272,7 +274,7 @@ def test_optional_match(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_cached_execution(client):
client.graph().query("CREATE ()")
@@ -291,7 +293,7 @@ def test_cached_execution(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_slowlog(client):
create_query = """CREATE (:Rider
{name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
@@ -305,8 +307,8 @@ def test_slowlog(client):
@pytest.mark.redismod
+@skip_if_resp_version(3)
@pytest.mark.xfail(strict=False)
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
def test_query_timeout(client):
# Build a sample graph with 1000 nodes.
client.graph().query("UNWIND range(0,1000) as val CREATE ({v: val})")
@@ -321,7 +323,7 @@ def test_query_timeout(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_read_only_query(client):
with pytest.raises(Exception):
# Issue a write query, specifying read-only true,
@@ -331,7 +333,7 @@ def test_read_only_query(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_profile(client):
q = """UNWIND range(1, 3) AS x CREATE (p:Person {v:x})"""
profile = client.graph().profile(q).result_set
@@ -347,8 +349,8 @@ def test_profile(client):
@pytest.mark.redismod
+@skip_if_resp_version(3)
@skip_if_redis_enterprise()
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
def test_config(client):
config_name = "RESULTSET_SIZE"
config_value = 3
@@ -381,7 +383,7 @@ def test_config(client):
@pytest.mark.onlynoncluster
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_list_keys(client):
result = client.graph().list_keys()
assert result == []
@@ -405,7 +407,7 @@ def test_list_keys(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_multi_label(client):
redis_graph = client.graph("g")
@@ -432,7 +434,7 @@ def test_multi_label(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_cache_sync(client):
pass
return
@@ -506,7 +508,7 @@ def test_cache_sync(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_execution_plan(client):
redis_graph = client.graph("execution_plan")
create_query = """CREATE
@@ -526,7 +528,7 @@ def test_execution_plan(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_explain(client):
redis_graph = client.graph("execution_plan")
# graph creation / population
@@ -616,7 +618,7 @@ def test_explain(client):
@pytest.mark.redismod
-@pytest.mark.skip(reason="Graph module removed from Redis Stack")
+@skip_if_resp_version(3)
def test_resultset_statistics(client):
with patch.object(target=QueryResult, attribute="_get_stat") as mock_get_stats:
result = client.graph().query("RETURN 1")
diff --git a/tests/test_hash.py b/tests/test_hash.py
new file mode 100644
index 0000000000..9ed5e98132
--- /dev/null
+++ b/tests/test_hash.py
@@ -0,0 +1,369 @@
+import time
+from datetime import datetime, timedelta
+
+import pytest
+from tests.conftest import skip_if_server_version_lt
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpire_basic(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ assert r.hexpire("test:hash", 1, "field1") == [1]
+ time.sleep(1.1)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpire_with_timedelta(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ assert r.hexpire("test:hash", timedelta(seconds=1), "field1") == [1]
+ time.sleep(1.1)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpire_conditions(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1"})
+ assert r.hexpire("test:hash", 2, "field1", xx=True) == [0]
+ assert r.hexpire("test:hash", 2, "field1", nx=True) == [1]
+ assert r.hexpire("test:hash", 1, "field1", xx=True) == [1]
+ assert r.hexpire("test:hash", 2, "field1", nx=True) == [0]
+ time.sleep(1.1)
+ assert r.hexists("test:hash", "field1") is False
+ r.hset("test:hash", "field1", "value1")
+ r.hexpire("test:hash", 2, "field1")
+ assert r.hexpire("test:hash", 1, "field1", gt=True) == [0]
+ assert r.hexpire("test:hash", 1, "field1", lt=True) == [1]
+ time.sleep(1.1)
+ assert r.hexists("test:hash", "field1") is False
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpire_nonexistent_key_or_field(r):
+ r.delete("test:hash")
+ assert r.hexpire("test:hash", 1, "field1") == [-2]
+ r.hset("test:hash", "field1", "value1")
+ assert r.hexpire("test:hash", 1, "nonexistent_field") == [-2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpire_multiple_fields(r):
+ r.delete("test:hash")
+ r.hset(
+ "test:hash",
+ mapping={"field1": "value1", "field2": "value2", "field3": "value3"},
+ )
+ assert r.hexpire("test:hash", 1, "field1", "field2") == [1, 1]
+ time.sleep(1.1)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is False
+ assert r.hexists("test:hash", "field3") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpire_multiple_condition_flags_error(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1"})
+ with pytest.raises(ValueError) as e:
+ r.hexpire("test:hash", 1, "field1", nx=True, xx=True)
+ assert "Only one of" in str(e)
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpire_basic(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ assert r.hpexpire("test:hash", 500, "field1") == [1]
+ time.sleep(0.6)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpire_with_timedelta(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ assert r.hpexpire("test:hash", timedelta(milliseconds=500), "field1") == [1]
+ time.sleep(0.6)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpire_conditions(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1"})
+ assert r.hpexpire("test:hash", 1500, "field1", xx=True) == [0]
+ assert r.hpexpire("test:hash", 1500, "field1", nx=True) == [1]
+ assert r.hpexpire("test:hash", 500, "field1", xx=True) == [1]
+ assert r.hpexpire("test:hash", 1500, "field1", nx=True) == [0]
+ time.sleep(0.6)
+ assert r.hexists("test:hash", "field1") is False
+ r.hset("test:hash", "field1", "value1")
+ r.hpexpire("test:hash", 1000, "field1")
+ assert r.hpexpire("test:hash", 500, "field1", gt=True) == [0]
+ assert r.hpexpire("test:hash", 500, "field1", lt=True) == [1]
+ time.sleep(0.6)
+ assert r.hexists("test:hash", "field1") is False
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpire_nonexistent_key_or_field(r):
+ r.delete("test:hash")
+ assert r.hpexpire("test:hash", 500, "field1") == [-2]
+ r.hset("test:hash", "field1", "value1")
+ assert r.hpexpire("test:hash", 500, "nonexistent_field") == [-2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpire_multiple_fields(r):
+ r.delete("test:hash")
+ r.hset(
+ "test:hash",
+ mapping={"field1": "value1", "field2": "value2", "field3": "value3"},
+ )
+ assert r.hpexpire("test:hash", 500, "field1", "field2") == [1, 1]
+ time.sleep(0.6)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is False
+ assert r.hexists("test:hash", "field3") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpire_multiple_condition_flags_error(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1"})
+ with pytest.raises(ValueError) as e:
+ r.hpexpire("test:hash", 500, "field1", nx=True, xx=True)
+ assert "Only one of" in str(e)
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpireat_basic(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ exp_time = int((datetime.now() + timedelta(seconds=1)).timestamp())
+ assert r.hexpireat("test:hash", exp_time, "field1") == [1]
+ time.sleep(1.1)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpireat_with_datetime(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ exp_time = datetime.now() + timedelta(seconds=1)
+ assert r.hexpireat("test:hash", exp_time, "field1") == [1]
+ time.sleep(1.1)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpireat_conditions(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1"})
+ future_exp_time = int((datetime.now() + timedelta(seconds=2)).timestamp())
+ past_exp_time = int((datetime.now() - timedelta(seconds=1)).timestamp())
+ assert r.hexpireat("test:hash", future_exp_time, "field1", xx=True) == [0]
+ assert r.hexpireat("test:hash", future_exp_time, "field1", nx=True) == [1]
+ assert r.hexpireat("test:hash", past_exp_time, "field1", gt=True) == [0]
+ assert r.hexpireat("test:hash", past_exp_time, "field1", lt=True) == [2]
+ assert r.hexists("test:hash", "field1") is False
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpireat_nonexistent_key_or_field(r):
+ r.delete("test:hash")
+ future_exp_time = int((datetime.now() + timedelta(seconds=1)).timestamp())
+ assert r.hexpireat("test:hash", future_exp_time, "field1") == [-2]
+ r.hset("test:hash", "field1", "value1")
+ assert r.hexpireat("test:hash", future_exp_time, "nonexistent_field") == [-2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpireat_multiple_fields(r):
+ r.delete("test:hash")
+ r.hset(
+ "test:hash",
+ mapping={"field1": "value1", "field2": "value2", "field3": "value3"},
+ )
+ exp_time = int((datetime.now() + timedelta(seconds=1)).timestamp())
+ assert r.hexpireat("test:hash", exp_time, "field1", "field2") == [1, 1]
+ time.sleep(1.1)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is False
+ assert r.hexists("test:hash", "field3") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpireat_multiple_condition_flags_error(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1"})
+ exp_time = int((datetime.now() + timedelta(seconds=1)).timestamp())
+ with pytest.raises(ValueError) as e:
+ r.hexpireat("test:hash", exp_time, "field1", nx=True, xx=True)
+ assert "Only one of" in str(e)
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpireat_basic(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ exp_time = int((datetime.now() + timedelta(milliseconds=400)).timestamp() * 1000)
+ assert r.hpexpireat("test:hash", exp_time, "field1") == [1]
+ time.sleep(0.5)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpireat_with_datetime(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ exp_time = datetime.now() + timedelta(milliseconds=400)
+ assert r.hpexpireat("test:hash", exp_time, "field1") == [1]
+ time.sleep(0.5)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpireat_conditions(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1"})
+ future_exp_time = int(
+ (datetime.now() + timedelta(milliseconds=500)).timestamp() * 1000
+ )
+ past_exp_time = int(
+ (datetime.now() - timedelta(milliseconds=500)).timestamp() * 1000
+ )
+ assert r.hpexpireat("test:hash", future_exp_time, "field1", xx=True) == [0]
+ assert r.hpexpireat("test:hash", future_exp_time, "field1", nx=True) == [1]
+ assert r.hpexpireat("test:hash", past_exp_time, "field1", gt=True) == [0]
+ assert r.hpexpireat("test:hash", past_exp_time, "field1", lt=True) == [2]
+ assert r.hexists("test:hash", "field1") is False
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpireat_nonexistent_key_or_field(r):
+ r.delete("test:hash")
+ future_exp_time = int(
+ (datetime.now() + timedelta(milliseconds=500)).timestamp() * 1000
+ )
+ assert r.hpexpireat("test:hash", future_exp_time, "field1") == [-2]
+ r.hset("test:hash", "field1", "value1")
+ assert r.hpexpireat("test:hash", future_exp_time, "nonexistent_field") == [-2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpireat_multiple_fields(r):
+ r.delete("test:hash")
+ r.hset(
+ "test:hash",
+ mapping={"field1": "value1", "field2": "value2", "field3": "value3"},
+ )
+ exp_time = int((datetime.now() + timedelta(milliseconds=400)).timestamp() * 1000)
+ assert r.hpexpireat("test:hash", exp_time, "field1", "field2") == [1, 1]
+ time.sleep(0.5)
+ assert r.hexists("test:hash", "field1") is False
+ assert r.hexists("test:hash", "field2") is False
+ assert r.hexists("test:hash", "field3") is True
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpireat_multiple_condition_flags_error(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1"})
+ exp_time = int((datetime.now() + timedelta(milliseconds=500)).timestamp())
+ with pytest.raises(ValueError) as e:
+ r.hpexpireat("test:hash", exp_time, "field1", nx=True, xx=True)
+ assert "Only one of" in str(e)
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpersist_multiple_fields(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ r.hexpire("test:hash", 5000, "field1")
+ assert r.hpersist("test:hash", "field1", "field2", "field3") == [1, -1, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpersist_nonexistent_key(r):
+ r.delete("test:hash")
+ assert r.hpersist("test:hash", "field1", "field2", "field3") == [-2, -2, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpiretime_multiple_fields_mixed_conditions(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ future_time = int((datetime.now() + timedelta(minutes=30)).timestamp())
+ r.hexpireat("test:hash", future_time, "field1")
+ result = r.hexpiretime("test:hash", "field1", "field2", "field3")
+ assert future_time - 10 < result[0] <= future_time
+ assert result[1:] == [-1, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hexpiretime_nonexistent_key(r):
+ r.delete("test:hash")
+ assert r.hexpiretime("test:hash", "field1", "field2", "field3") == [-2, -2, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpiretime_multiple_fields_mixed_conditions(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ future_time = int((datetime.now() + timedelta(minutes=30)).timestamp())
+ r.hexpireat("test:hash", future_time, "field1")
+ result = r.hpexpiretime("test:hash", "field1", "field2", "field3")
+ assert future_time * 1000 - 10000 < result[0] <= future_time * 1000
+ assert result[1:] == [-1, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpexpiretime_nonexistent_key(r):
+ r.delete("test:hash")
+ assert r.hpexpiretime("test:hash", "field1", "field2", "field3") == [-2, -2, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_httl_multiple_fields_mixed_conditions(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ future_time = int((datetime.now() + timedelta(minutes=30)).timestamp())
+ r.hexpireat("test:hash", future_time, "field1")
+ result = r.httl("test:hash", "field1", "field2", "field3")
+ assert 30 * 60 - 10 < result[0] <= 30 * 60
+ assert result[1:] == [-1, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_httl_nonexistent_key(r):
+ r.delete("test:hash")
+ assert r.httl("test:hash", "field1", "field2", "field3") == [-2, -2, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpttl_multiple_fields_mixed_conditions(r):
+ r.delete("test:hash")
+ r.hset("test:hash", mapping={"field1": "value1", "field2": "value2"})
+ future_time = int((datetime.now() + timedelta(minutes=30)).timestamp())
+ r.hexpireat("test:hash", future_time, "field1")
+ result = r.hpttl("test:hash", "field1", "field2", "field3")
+ assert 30 * 60000 - 10000 < result[0] <= 30 * 60000
+ assert result[1:] == [-1, -2]
+
+
+@skip_if_server_version_lt("7.3.240")
+def test_hpttl_nonexistent_key(r):
+ r.delete("test:hash")
+ assert r.hpttl("test:hash", "field1", "field2", "field3") == [-2, -2, -2]
diff --git a/tests/test_json.py b/tests/test_json.py
index 00f7e2fce1..f4cea73787 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -8,12 +8,13 @@
@pytest.fixture
-def client(request):
- r = _get_client(Redis, request, decode_responses=True)
+def client(request, stack_url):
+ r = _get_client(Redis, request, decode_responses=True, from_url=stack_url)
r.flushdb()
return r
+@pytest.mark.redismod
def test_json_setbinarykey(client):
d = {"hello": "world", b"some": "value"}
with pytest.raises(TypeError):
@@ -21,28 +22,32 @@ def test_json_setbinarykey(client):
assert client.json().set("somekey", Path.root_path(), d, decode_keys=True)
+@pytest.mark.redismod
def test_json_setgetdeleteforget(client):
assert client.json().set("foo", Path.root_path(), "bar")
- assert_resp_response(client, client.json().get("foo"), "bar", [["bar"]])
+ assert client.json().get("foo") == "bar"
assert client.json().get("baz") is None
assert client.json().delete("foo") == 1
assert client.json().forget("foo") == 0 # second delete
assert client.exists("foo") == 0
+@pytest.mark.redismod
def test_jsonget(client):
client.json().set("foo", Path.root_path(), "bar")
- assert_resp_response(client, client.json().get("foo"), "bar", [["bar"]])
+ assert client.json().get("foo") == "bar"
+@pytest.mark.redismod
def test_json_get_jset(client):
assert client.json().set("foo", Path.root_path(), "bar")
- assert_resp_response(client, client.json().get("foo"), "bar", [["bar"]])
+ assert client.json().get("foo") == "bar"
assert client.json().get("baz") is None
assert 1 == client.json().delete("foo")
assert client.exists("foo") == 0
+@pytest.mark.redismod
@skip_ifmodversion_lt("2.06.00", "ReJSON") # todo: update after the release
def test_json_merge(client):
# Test with root path $
@@ -75,16 +80,15 @@ def test_json_merge(client):
}
+@pytest.mark.redismod
def test_nonascii_setgetdelete(client):
assert client.json().set("notascii", Path.root_path(), "hyvää-élève")
- res = "hyvää-élève"
- assert_resp_response(
- client, client.json().get("notascii", no_escape=True), res, [[res]]
- )
+ assert client.json().get("notascii", no_escape=True) == "hyvää-élève"
assert 1 == client.json().delete("notascii")
assert client.exists("notascii") == 0
+@pytest.mark.redismod
def test_jsonsetexistentialmodifiersshouldsucceed(client):
obj = {"foo": "bar"}
assert client.json().set("obj", Path.root_path(), obj)
@@ -97,12 +101,12 @@ def test_jsonsetexistentialmodifiersshouldsucceed(client):
assert client.json().set("obj", Path("foo"), "baz", xx=True)
assert client.json().set("obj", Path("qaz"), "baz", nx=True)
- # Test that flags are mutually exlusive
+ # Test that flags are mutually exclusive
with pytest.raises(Exception):
client.json().set("obj", Path("foo"), "baz", nx=True, xx=True)
-@pytest.mark.onlynoncluster
+@pytest.mark.redismod
def test_mgetshouldsucceed(client):
client.json().set("1", Path.root_path(), 1)
client.json().set("2", Path.root_path(), 2)
@@ -112,6 +116,7 @@ def test_mgetshouldsucceed(client):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
@skip_ifmodversion_lt("2.06.00", "ReJSON")
def test_mset(client):
client.json().mset([("1", Path.root_path(), 1), ("2", Path.root_path(), 2)])
@@ -120,13 +125,15 @@ def test_mset(client):
assert client.json().mget(["1", "2"], Path.root_path()) == [1, 2]
+@pytest.mark.redismod
@skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release
def test_clear(client):
client.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
assert 1 == client.json().clear("arr", Path.root_path())
- assert_resp_response(client, client.json().get("arr"), [], [[[]]])
+ assert_resp_response(client, client.json().get("arr"), [], [])
+@pytest.mark.redismod
def test_type(client):
client.json().set("1", Path.root_path(), 1)
assert_resp_response(
@@ -135,6 +142,7 @@ def test_type(client):
assert_resp_response(client, client.json().type("1"), "integer", ["integer"])
+@pytest.mark.redismod
def test_numincrby(client):
client.json().set("num", Path.root_path(), 1)
assert_resp_response(
@@ -148,6 +156,7 @@ def test_numincrby(client):
)
+@pytest.mark.redismod
def test_nummultby(client):
client.json().set("num", Path.root_path(), 1)
@@ -163,6 +172,7 @@ def test_nummultby(client):
)
+@pytest.mark.redismod
@skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release
def test_toggle(client):
client.json().set("bool", Path.root_path(), False)
@@ -174,23 +184,14 @@ def test_toggle(client):
client.json().toggle("num", Path.root_path())
+@pytest.mark.redismod
def test_strappend(client):
client.json().set("jsonkey", Path.root_path(), "foo")
assert 6 == client.json().strappend("jsonkey", "bar")
- assert_resp_response(
- client, client.json().get("jsonkey", Path.root_path()), "foobar", [["foobar"]]
- )
-
-
-# # def test_debug(client):
-# client.json().set("str", Path.root_path(), "foo")
-# assert 24 == client.json().debug("MEMORY", "str", Path.root_path())
-# assert 24 == client.json().debug("MEMORY", "str")
-#
-# # technically help is valid
-# assert isinstance(client.json().debug("HELP"), list)
+ assert "foobar" == client.json().get("jsonkey", Path.root_path())
+@pytest.mark.redismod
def test_strlen(client):
client.json().set("str", Path.root_path(), "foo")
assert 3 == client.json().strlen("str", Path.root_path())
@@ -199,6 +200,7 @@ def test_strlen(client):
assert 6 == client.json().strlen("str")
+@pytest.mark.redismod
def test_arrappend(client):
client.json().set("arr", Path.root_path(), [1])
assert 2 == client.json().arrappend("arr", Path.root_path(), 2)
@@ -206,6 +208,7 @@ def test_arrappend(client):
assert 7 == client.json().arrappend("arr", Path.root_path(), *[5, 6, 7])
+@pytest.mark.redismod
def test_arrindex(client):
client.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
assert 1 == client.json().arrindex("arr", Path.root_path(), 1)
@@ -217,19 +220,19 @@ def test_arrindex(client):
assert -1 == client.json().arrindex("arr", Path.root_path(), 4, start=1, stop=3)
+@pytest.mark.redismod
def test_arrinsert(client):
client.json().set("arr", Path.root_path(), [0, 4])
assert 5 - -client.json().arrinsert("arr", Path.root_path(), 1, *[1, 2, 3])
- res = [0, 1, 2, 3, 4]
- assert_resp_response(client, client.json().get("arr"), res, [[res]])
+ assert client.json().get("arr") == [0, 1, 2, 3, 4]
# test prepends
client.json().set("val2", Path.root_path(), [5, 6, 7, 8, 9])
client.json().arrinsert("val2", Path.root_path(), 0, ["some", "thing"])
- res = [["some", "thing"], 5, 6, 7, 8, 9]
- assert_resp_response(client, client.json().get("val2"), res, [[res]])
+ assert client.json().get("val2") == [["some", "thing"], 5, 6, 7, 8, 9]
+@pytest.mark.redismod
def test_arrlen(client):
client.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
assert 5 == client.json().arrlen("arr", Path.root_path())
@@ -237,13 +240,14 @@ def test_arrlen(client):
assert client.json().arrlen("fakekey") is None
+@pytest.mark.redismod
def test_arrpop(client):
client.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
assert 4 == client.json().arrpop("arr", Path.root_path(), 4)
assert 3 == client.json().arrpop("arr", Path.root_path(), -1)
assert 2 == client.json().arrpop("arr", Path.root_path())
assert 0 == client.json().arrpop("arr", Path.root_path(), 0)
- assert_resp_response(client, client.json().get("arr"), [1], [[[1]]])
+ assert [1] == client.json().get("arr")
# test out of bounds
client.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
@@ -254,10 +258,11 @@ def test_arrpop(client):
assert client.json().arrpop("arr") is None
+@pytest.mark.redismod
def test_arrtrim(client):
client.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
assert 3 == client.json().arrtrim("arr", Path.root_path(), 1, 3)
- assert_resp_response(client, client.json().get("arr"), [1, 2, 3], [[[1, 2, 3]]])
+ assert [1, 2, 3] == client.json().get("arr")
# <0 test, should be 0 equivalent
client.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4])
@@ -276,6 +281,7 @@ def test_arrtrim(client):
assert 0 == client.json().arrtrim("arr", Path.root_path(), 9, 11)
+@pytest.mark.redismod
def test_resp(client):
obj = {"foo": "bar", "baz": 1, "qaz": True}
client.json().set("obj", Path.root_path(), obj)
@@ -285,6 +291,7 @@ def test_resp(client):
assert isinstance(client.json().resp("obj"), list)
+@pytest.mark.redismod
def test_objkeys(client):
obj = {"foo": "bar", "baz": "qaz"}
client.json().set("obj", Path.root_path(), obj)
@@ -301,6 +308,7 @@ def test_objkeys(client):
assert client.json().objkeys("fakekey") is None
+@pytest.mark.redismod
def test_objlen(client):
obj = {"foo": "bar", "baz": "qaz"}
client.json().set("obj", Path.root_path(), obj)
@@ -310,12 +318,13 @@ def test_objlen(client):
assert len(obj) == client.json().objlen("obj")
+@pytest.mark.redismod
def test_json_commands_in_pipeline(client):
p = client.json().pipeline()
p.set("foo", Path.root_path(), "bar")
p.get("foo")
p.delete("foo")
- assert_resp_response(client, p.execute(), [True, "bar", 1], [True, [["bar"]], 1])
+ assert p.execute() == [True, "bar", 1]
assert client.keys() == []
assert client.get("foo") is None
@@ -328,23 +337,24 @@ def test_json_commands_in_pipeline(client):
p.jsonget("foo")
p.exists("notarealkey")
p.delete("foo")
- assert_resp_response(client, p.execute(), [True, d, 0, 1], [True, [[d]], 0, 1])
+ assert p.execute() == [True, d, 0, 1]
assert client.keys() == []
assert client.get("foo") is None
+@pytest.mark.redismod
def test_json_delete_with_dollar(client):
doc1 = {"a": 1, "nested": {"a": 2, "b": 3}}
assert client.json().set("doc1", "$", doc1)
assert client.json().delete("doc1", "$..a") == 2
res = [{"nested": {"b": 3}}]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
assert client.json().set("doc2", "$", doc2)
assert client.json().delete("doc2", "$..a") == 1
res = [{"nested": {"b": [True, "a", "b"]}, "b": ["a", "b"]}]
- assert_resp_response(client, client.json().get("doc2", "$"), res, [res])
+ assert client.json().get("doc2", "$") == res
doc3 = [
{
@@ -375,7 +385,7 @@ def test_json_delete_with_dollar(client):
}
]
]
- assert_resp_response(client, client.json().get("doc3", "$"), doc3val, [doc3val])
+ assert client.json().get("doc3", "$") == doc3val
# Test default path
assert client.json().delete("doc3") == 1
@@ -384,18 +394,19 @@ def test_json_delete_with_dollar(client):
client.json().delete("not_a_document", "..a")
+@pytest.mark.redismod
def test_json_forget_with_dollar(client):
doc1 = {"a": 1, "nested": {"a": 2, "b": 3}}
assert client.json().set("doc1", "$", doc1)
assert client.json().forget("doc1", "$..a") == 2
res = [{"nested": {"b": 3}}]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
assert client.json().set("doc2", "$", doc2)
assert client.json().forget("doc2", "$..a") == 1
res = [{"nested": {"b": [True, "a", "b"]}, "b": ["a", "b"]}]
- assert_resp_response(client, client.json().get("doc2", "$"), res, [res])
+ assert client.json().get("doc2", "$") == res
doc3 = [
{
@@ -426,7 +437,7 @@ def test_json_forget_with_dollar(client):
}
]
]
- assert_resp_response(client, client.json().get("doc3", "$"), doc3val, [doc3val])
+ assert client.json().get("doc3", "$") == doc3val
# Test default path
assert client.json().forget("doc3") == 1
@@ -435,6 +446,7 @@ def test_json_forget_with_dollar(client):
client.json().forget("not_a_document", "..a")
+@pytest.mark.redismod
def test_json_mget_dollar(client):
# Test mget with multi paths
client.json().set(
@@ -449,21 +461,22 @@ def test_json_mget_dollar(client):
)
# Compare also to single JSON.GET
res = [1, 3, None]
- assert_resp_response(client, client.json().get("doc1", "$..a"), res, [res])
+ assert client.json().get("doc1", "$..a") == res
res = [4, 6, [None]]
- assert_resp_response(client, client.json().get("doc2", "$..a"), res, [res])
+ assert client.json().get("doc2", "$..a") == res
# Test mget with single path
- client.json().mget("doc1", "$..a") == [1, 3, None]
+ assert client.json().mget(["doc1"], "$..a") == [[1, 3, None]]
# Test mget with multi path
- client.json().mget(["doc1", "doc2"], "$..a") == [[1, 3, None], [4, 6, [None]]]
+ res = [[1, 3, None], [4, 6, [None]]]
+ assert client.json().mget(["doc1", "doc2"], "$..a") == res
# Test missing key
- client.json().mget(["doc1", "missing_doc"], "$..a") == [[1, 3, None], None]
- res = client.json().mget(["missing_doc1", "missing_doc2"], "$..a")
- assert res == [None, None]
+ assert client.json().mget(["doc1", "missing_doc"], "$..a") == [[1, 3, None], None]
+ assert client.json().mget(["missing_doc1", "missing_doc2"], "$..a") == [None, None]
+@pytest.mark.redismod
def test_numby_commands_dollar(client):
# Test NUMINCRBY
client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
@@ -498,45 +511,49 @@ def test_numby_commands_dollar(client):
# Test legacy NUMINCRBY
client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
- client.json().numincrby("doc1", ".b[0].a", 3) == 5
+ assert_resp_response(client, client.json().numincrby("doc1", ".b[0].a", 3), 5, [5])
# Test legacy NUMMULTBY
client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
with pytest.deprecated_call():
- client.json().nummultby("doc1", ".b[0].a", 3) == 6
+ assert_resp_response(
+ client, client.json().nummultby("doc1", ".b[0].a", 3), 6, [6]
+ )
+@pytest.mark.redismod
def test_strappend_dollar(client):
client.json().set(
"doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}
)
# Test multi
- client.json().strappend("doc1", "bar", "$..a") == [6, 8, None]
+ assert client.json().strappend("doc1", "bar", "$..a") == [6, 8, None]
- # res = [{"a": "foobar", "nested1": {"a": "hellobar"}, "nested2": {"a": 31}}]
- # assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ res = [{"a": "foobar", "nested1": {"a": "hellobar"}, "nested2": {"a": 31}}]
+ assert_resp_response(client, client.json().get("doc1", "$"), res, res)
# Test single
- client.json().strappend("doc1", "baz", "$.nested1.a") == [11]
+ assert client.json().strappend("doc1", "baz", "$.nested1.a") == [11]
- # res = [{"a": "foobar", "nested1": {"a": "hellobarbaz"}, "nested2": {"a": 31}}]
- # assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ res = [{"a": "foobar", "nested1": {"a": "hellobarbaz"}, "nested2": {"a": 31}}]
+ assert_resp_response(client, client.json().get("doc1", "$"), res, res)
# Test missing key
with pytest.raises(exceptions.ResponseError):
client.json().strappend("non_existing_doc", "$..a", "err")
# Test multi
- client.json().strappend("doc1", "bar", ".*.a") == 8
- # res = [{"a": "foo", "nested1": {"a": "hellobar"}, "nested2": {"a": 31}}]
- # assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().strappend("doc1", "bar", ".*.a") == 14
+ res = [{"a": "foobar", "nested1": {"a": "hellobarbazbar"}, "nested2": {"a": 31}}]
+ assert_resp_response(client, client.json().get("doc1", "$"), res, res)
# Test missing path
with pytest.raises(exceptions.ResponseError):
client.json().strappend("doc1", "piu")
+@pytest.mark.redismod
def test_strlen_dollar(client):
# Test multi
client.json().set(
@@ -549,14 +566,15 @@ def test_strlen_dollar(client):
assert res1 == res2
# Test single
- client.json().strlen("doc1", "$.nested1.a") == [8]
- client.json().strlen("doc1", "$.nested2.a") == [None]
+ assert client.json().strlen("doc1", "$.nested1.a") == [8]
+ assert client.json().strlen("doc1", "$.nested2.a") == [None]
# Test missing key
with pytest.raises(exceptions.ResponseError):
client.json().strlen("non_existing_doc", "$..a")
+@pytest.mark.redismod
def test_arrappend_dollar(client):
client.json().set(
"doc1",
@@ -568,7 +586,7 @@ def test_arrappend_dollar(client):
},
)
# Test multi
- client.json().arrappend("doc1", "$..a", "bar", "racuda") == [3, 5, None]
+ assert client.json().arrappend("doc1", "$..a", "bar", "racuda") == [3, 5, None]
res = [
{
"a": ["foo", "bar", "racuda"],
@@ -576,7 +594,7 @@ def test_arrappend_dollar(client):
"nested2": {"a": 31},
}
]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test single
assert client.json().arrappend("doc1", "$.nested1.a", "baz") == [6]
@@ -587,7 +605,7 @@ def test_arrappend_dollar(client):
"nested2": {"a": 31},
}
]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
@@ -613,7 +631,7 @@ def test_arrappend_dollar(client):
"nested2": {"a": 31},
}
]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test single
assert client.json().arrappend("doc1", ".nested1.a", "baz") == 6
@@ -624,13 +642,14 @@ def test_arrappend_dollar(client):
"nested2": {"a": 31},
}
]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
client.json().arrappend("non_existing_doc", "$..a")
+@pytest.mark.redismod
def test_arrinsert_dollar(client):
client.json().set(
"doc1",
@@ -651,7 +670,7 @@ def test_arrinsert_dollar(client):
"nested2": {"a": 31},
}
]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test single
assert client.json().arrinsert("doc1", "$.nested1.a", -2, "baz") == [6]
@@ -662,13 +681,14 @@ def test_arrinsert_dollar(client):
"nested2": {"a": 31},
}
]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
client.json().arrappend("non_existing_doc", "$..a")
+@pytest.mark.redismod
def test_arrlen_dollar(client):
client.json().set(
"doc1",
@@ -717,6 +737,7 @@ def test_arrlen_dollar(client):
assert client.json().arrlen("non_existing_doc", "..a") is None
+@pytest.mark.redismod
def test_arrpop_dollar(client):
client.json().set(
"doc1",
@@ -732,7 +753,7 @@ def test_arrpop_dollar(client):
assert client.json().arrpop("doc1", "$..a", 1) == ['"foo"', None, None]
res = [{"a": [], "nested1": {"a": ["hello", "world"]}, "nested2": {"a": 31}}]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
@@ -749,15 +770,16 @@ def test_arrpop_dollar(client):
},
)
# Test multi (all paths are updated, but return result of last path)
- client.json().arrpop("doc1", "..a", "1") is None
+ assert client.json().arrpop("doc1", "..a", "1") == "null"
res = [{"a": [], "nested1": {"a": ["hello", "world"]}, "nested2": {"a": 31}}]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# # Test missing key
with pytest.raises(exceptions.ResponseError):
client.json().arrpop("non_existing_doc", "..a")
+@pytest.mark.redismod
def test_arrtrim_dollar(client):
client.json().set(
"doc1",
@@ -771,16 +793,16 @@ def test_arrtrim_dollar(client):
# Test multi
assert client.json().arrtrim("doc1", "$..a", "1", -1) == [0, 2, None]
res = [{"a": [], "nested1": {"a": [None, "world"]}, "nested2": {"a": 31}}]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
assert client.json().arrtrim("doc1", "$..a", "1", "1") == [0, 1, None]
res = [{"a": [], "nested1": {"a": ["world"]}, "nested2": {"a": 31}}]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test single
assert client.json().arrtrim("doc1", "$.nested1.a", 1, 0) == [0]
res = [{"a": [], "nested1": {"a": []}, "nested2": {"a": 31}}]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
@@ -803,13 +825,14 @@ def test_arrtrim_dollar(client):
# Test single
assert client.json().arrtrim("doc1", ".nested1.a", "1", "1") == 1
res = [{"a": [], "nested1": {"a": ["world"]}, "nested2": {"a": 31}}]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
client.json().arrtrim("non_existing_doc", "..a", 1, 1)
+@pytest.mark.redismod
def test_objkeys_dollar(client):
client.json().set(
"doc1",
@@ -839,6 +862,7 @@ def test_objkeys_dollar(client):
assert client.json().objkeys("doc1", "$..nowhere") == []
+@pytest.mark.redismod
def test_objlen_dollar(client):
client.json().set(
"doc1",
@@ -893,6 +917,7 @@ def load_types_data(nested_key_name):
return jdata, types
+@pytest.mark.redismod
def test_type_dollar(client):
jdata, jtypes = load_types_data("a")
client.json().set("doc1", "$", jdata)
@@ -910,6 +935,7 @@ def test_type_dollar(client):
)
+@pytest.mark.redismod
def test_clear_dollar(client):
client.json().set(
"doc1",
@@ -927,7 +953,7 @@ def test_clear_dollar(client):
res = [
{"nested1": {"a": {}}, "a": [], "nested2": {"a": "claro"}, "nested3": {"a": {}}}
]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test single
client.json().set(
@@ -949,17 +975,18 @@ def test_clear_dollar(client):
"nested3": {"a": {"baz": 50}},
}
]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test missing path (defaults to root)
assert client.json().clear("doc1") == 1
- assert_resp_response(client, client.json().get("doc1", "$"), [{}], [[{}]])
+ assert client.json().get("doc1", "$") == [{}]
# Test missing key
with pytest.raises(exceptions.ResponseError):
client.json().clear("non_existing_doc", "$..a")
+@pytest.mark.redismod
def test_toggle_dollar(client):
client.json().set(
"doc1",
@@ -981,35 +1008,14 @@ def test_toggle_dollar(client):
"nested3": {"a": False},
}
]
- assert_resp_response(client, client.json().get("doc1", "$"), res, [res])
+ assert client.json().get("doc1", "$") == res
# Test missing key
with pytest.raises(exceptions.ResponseError):
client.json().toggle("non_existing_doc", "$..a")
-# # def test_debug_dollar(client):
-#
-# jdata, jtypes = load_types_data("a")
-#
-# client.json().set("doc1", "$", jdata)
-#
-# # Test multi
-# assert client.json().debug("MEMORY", "doc1", "$..a") == [72, 24, 24, 16, 16, 1, 0]
-#
-# # Test single
-# assert client.json().debug("MEMORY", "doc1", "$.nested2.a") == [24]
-#
-# # Test legacy
-# assert client.json().debug("MEMORY", "doc1", "..a") == 72
-#
-# # Test missing path (defaults to root)
-# assert client.json().debug("MEMORY", "doc1") == 72
-#
-# # Test missing key
-# assert client.json().debug("MEMORY", "non_existing_doc", "$..a") == []
-
-
+@pytest.mark.redismod
def test_resp_dollar(client):
data = {
"L1": {
@@ -1238,6 +1244,7 @@ def test_resp_dollar(client):
client.json().resp("non_existing_doc", "$..a")
+@pytest.mark.redismod
def test_arrindex_dollar(client):
client.json().set(
"store",
@@ -1281,12 +1288,10 @@ def test_arrindex_dollar(client):
},
)
- assert_resp_response(
- client,
- client.json().get("store", "$.store.book[?(@.price<10)].size"),
- [[10, 20, 30, 40], [5, 10, 20, 30]],
- [[[10, 20, 30, 40], [5, 10, 20, 30]]],
- )
+ assert client.json().get("store", "$.store.book[?(@.price<10)].size") == [
+ [10, 20, 30, 40],
+ [5, 10, 20, 30],
+ ]
assert client.json().arrindex(
"store", "$.store.book[?(@.price<10)].size", "20"
@@ -1315,7 +1320,7 @@ def test_arrindex_dollar(client):
"3",
[],
]
- assert_resp_response(client, client.json().get("test_num", "$..arr"), res, [res])
+ assert client.json().get("test_num", "$..arr") == res
assert client.json().arrindex("test_num", "$..arr", 3) == [3, 2, -1, None, -1]
@@ -1348,7 +1353,7 @@ def test_arrindex_dollar(client):
"3",
[],
]
- assert_resp_response(client, client.json().get("test_string", "$..arr"), res, [res])
+ assert client.json().get("test_string", "$..arr") == res
assert client.json().arrindex("test_string", "$..arr", "baz") == [
3,
@@ -1441,7 +1446,7 @@ def test_arrindex_dollar(client):
None,
[],
]
- assert_resp_response(client, client.json().get("test_None", "$..arr"), res, [res])
+ assert client.json().get("test_None", "$..arr") == res
# Test with none-scalar value
assert client.json().arrindex(
@@ -1463,6 +1468,7 @@ def test_arrindex_dollar(client):
assert client.json().arrindex("test_None", "..nested2_not_found.arr", "None") == 0
+@pytest.mark.redismod
def test_decoders_and_unstring():
assert unstring("4") == 4
assert unstring("45.55") == 45.55
@@ -1473,6 +1479,7 @@ def test_decoders_and_unstring():
assert decode_list(["hello", b"world"]) == ["hello", "world"]
+@pytest.mark.redismod
def test_custom_decoder(client):
import json
@@ -1480,7 +1487,7 @@ def test_custom_decoder(client):
cj = client.json(encoder=ujson, decoder=ujson)
assert cj.set("foo", Path.root_path(), "bar")
- assert_resp_response(client, cj.get("foo"), "bar", [["bar"]])
+ assert cj.get("foo") == "bar"
assert cj.get("baz") is None
assert 1 == cj.delete("foo")
assert client.exists("foo") == 0
@@ -1488,6 +1495,7 @@ def test_custom_decoder(client):
assert not isinstance(cj.__decoder__, json.JSONDecoder)
+@pytest.mark.redismod
def test_set_file(client):
import json
import tempfile
@@ -1501,11 +1509,12 @@ def test_set_file(client):
nojsonfile.write(b"Hello World")
assert client.json().set_file("test", Path.root_path(), jsonfile.name)
- assert_resp_response(client, client.json().get("test"), obj, [[obj]])
+ assert client.json().get("test") == obj
with pytest.raises(json.JSONDecodeError):
client.json().set_file("test2", Path.root_path(), nojsonfile.name)
+@pytest.mark.redismod
def test_set_path(client):
import json
import tempfile
@@ -1523,6 +1532,4 @@ def test_set_path(client):
result = {jsonfile: True, nojsonfile: False}
assert client.json().set_path(Path.root_path(), root) == result
res = {"hello": "world"}
- assert_resp_response(
- client, client.json().get(jsonfile.rsplit(".")[0]), res, [[res]]
- )
+ assert client.json().get(jsonfile.rsplit(".")[0]) == res
diff --git a/tests/test_retry.py b/tests/test_retry.py
index e9d3015897..183807386d 100644
--- a/tests/test_retry.py
+++ b/tests/test_retry.py
@@ -1,7 +1,7 @@
from unittest.mock import patch
import pytest
-from redis.backoff import ExponentialBackoff, NoBackoff
+from redis.backoff import AbstractBackoff, ExponentialBackoff, NoBackoff
from redis.client import Redis
from redis.connection import Connection, UnixDomainSocketConnection
from redis.exceptions import (
@@ -15,7 +15,7 @@
from .conftest import _get_client
-class BackoffMock:
+class BackoffMock(AbstractBackoff):
def __init__(self):
self.reset_calls = 0
self.calls = 0
diff --git a/tests/test_search.py b/tests/test_search.py
index bfe204254c..13ee877eaf 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -4,6 +4,7 @@
import time
from io import TextIOWrapper
+import numpy as np
import pytest
import redis
import redis.commands.search
@@ -29,6 +30,7 @@
assert_resp_response,
is_resp2_connection,
skip_if_redis_enterprise,
+ skip_if_resp_version,
skip_ifmodversion_lt,
)
@@ -107,8 +109,8 @@ def createIndex(client, num_docs=100, definition=None):
@pytest.fixture
-def client(request):
- r = _get_client(redis.Redis, request, decode_responses=True)
+def client(request, stack_url):
+ r = _get_client(redis.Redis, request, decode_responses=True, from_url=stack_url)
r.flushdb()
return r
@@ -1707,6 +1709,54 @@ def test_search_return_fields(client):
assert "telmatosaurus" == total["results"][0]["extra_attributes"]["txt"]
+@pytest.mark.redismod
+@skip_if_resp_version(3)
+def test_binary_and_text_fields(client):
+ fake_vec = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32)
+
+ index_name = "mixed_index"
+ mixed_data = {"first_name": "🐍python", "vector_emb": fake_vec.tobytes()}
+ client.hset(f"{index_name}:1", mapping=mixed_data)
+
+ schema = (
+ TagField("first_name"),
+ VectorField(
+ "embeddings_bio",
+ algorithm="HNSW",
+ attributes={
+ "TYPE": "FLOAT32",
+ "DIM": 4,
+ "DISTANCE_METRIC": "COSINE",
+ },
+ ),
+ )
+
+ client.ft(index_name).create_index(
+ fields=schema,
+ definition=IndexDefinition(
+ prefix=[f"{index_name}:"], index_type=IndexType.HASH
+ ),
+ )
+
+ query = (
+ Query("*")
+ .return_field("vector_emb", decode_field=False)
+ .return_field("first_name")
+ )
+ docs = client.ft(index_name).search(query=query, query_params={}).docs
+ decoded_vec_from_search_results = np.frombuffer(
+ docs[0]["vector_emb"], dtype=np.float32
+ )
+
+ assert np.array_equal(
+ decoded_vec_from_search_results, fake_vec
+ ), "The vectors are not equal"
+
+ assert (
+ docs[0]["first_name"] == mixed_data["first_name"]
+ ), "The text field is not decoded correctly"
+
+
@pytest.mark.redismod
def test_synupdate(client):
definition = IndexDefinition(index_type=IndexType.HASH)
@@ -2107,16 +2157,60 @@ def test_geo_params(client):
params_dict = {"lat": "34.95126", "lon": "29.69465", "radius": 1000, "units": "km"}
q = Query("@g:[$lon $lat $radius $units]").dialect(2)
res = client.ft().search(q, query_params=params_dict)
- if is_resp2_connection(client):
- assert 3 == res.total
- assert "doc1" == res.docs[0].id
- assert "doc2" == res.docs[1].id
- assert "doc3" == res.docs[2].id
- else:
- assert 3 == res["total_results"]
- assert "doc1" == res["results"][0]["id"]
- assert "doc2" == res["results"][1]["id"]
- assert "doc3" == res["results"][2]["id"]
+ _assert_search_result(client, res, ["doc1", "doc2", "doc3"])
+
+
+@pytest.mark.redismod
+def test_geoshapes_query_intersects_and_disjoint(client):
+ client.ft().create_index((GeoShapeField("g", coord_system=GeoShapeField.FLAT)))
+ client.hset("doc_point1", mapping={"g": "POINT (10 10)"})
+ client.hset("doc_point2", mapping={"g": "POINT (50 50)"})
+ client.hset("doc_polygon1", mapping={"g": "POLYGON ((20 20, 25 35, 35 25, 20 20))"})
+ client.hset(
+ "doc_polygon2", mapping={"g": "POLYGON ((60 60, 65 75, 70 70, 65 55, 60 60))"}
+ )
+
+ intersection = client.ft().search(
+ Query("@g:[intersects $shape]").dialect(3),
+ query_params={"shape": "POLYGON((15 15, 75 15, 50 70, 20 40, 15 15))"},
+ )
+ _assert_search_result(client, intersection, ["doc_point2", "doc_polygon1"])
+
+ disjunction = client.ft().search(
+ Query("@g:[disjoint $shape]").dialect(3),
+ query_params={"shape": "POLYGON((15 15, 75 15, 50 70, 20 40, 15 15))"},
+ )
+ _assert_search_result(client, disjunction, ["doc_point1", "doc_polygon2"])
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("2.10.0", "search")
+def test_geoshapes_query_contains_and_within(client):
+ client.ft().create_index((GeoShapeField("g", coord_system=GeoShapeField.FLAT)))
+ client.hset("doc_point1", mapping={"g": "POINT (10 10)"})
+ client.hset("doc_point2", mapping={"g": "POINT (50 50)"})
+ client.hset("doc_polygon1", mapping={"g": "POLYGON ((20 20, 25 35, 35 25, 20 20))"})
+ client.hset(
+ "doc_polygon2", mapping={"g": "POLYGON ((60 60, 65 75, 70 70, 65 55, 60 60))"}
+ )
+
+ contains_a = client.ft().search(
+ Query("@g:[contains $shape]").dialect(3),
+ query_params={"shape": "POINT(25 25)"},
+ )
+ _assert_search_result(client, contains_a, ["doc_polygon1"])
+
+ contains_b = client.ft().search(
+ Query("@g:[contains $shape]").dialect(3),
+ query_params={"shape": "POLYGON((24 24, 24 26, 25 25, 24 24))"},
+ )
+ _assert_search_result(client, contains_b, ["doc_polygon1"])
+
+ within = client.ft().search(
+ Query("@g:[within $shape]").dialect(3),
+ query_params={"shape": "POLYGON((15 15, 75 15, 50 70, 20 40, 15 15))"},
+ )
+ _assert_search_result(client, within, ["doc_point2", "doc_polygon1"])
@pytest.mark.redismod
@@ -2280,7 +2374,213 @@ def test_geoshape(client: redis.Redis):
q2 = Query("@geom:[CONTAINS $poly]").dialect(3)
qp2 = {"poly": "POLYGON((2 2, 2 50, 50 50, 50 2, 2 2))"}
result = client.ft().search(q1, query_params=qp1)
- assert len(result.docs) == 1
- assert result.docs[0]["id"] == "small"
+ _assert_search_result(client, result, ["small"])
result = client.ft().search(q2, query_params=qp2)
- assert len(result.docs) == 2
+ _assert_search_result(client, result, ["small", "large"])
+
+
+@pytest.mark.redismod
+def test_search_missing_fields(client):
+ definition = IndexDefinition(prefix=["property:"], index_type=IndexType.HASH)
+
+ fields = [
+ TextField("title", sortable=True),
+ TagField("features", index_missing=True),
+ TextField("description", index_missing=True),
+ ]
+
+ client.ft().create_index(fields, definition=definition)
+
+ # All fields present
+ client.hset(
+ "property:1",
+ mapping={
+ "title": "Luxury Villa in Malibu",
+ "features": "pool,sea view,modern",
+ "description": "A stunning modern villa overlooking the Pacific Ocean.",
+ },
+ )
+
+ # Missing features
+ client.hset(
+ "property:2",
+ mapping={
+ "title": "Downtown Flat",
+ "description": "Modern flat in central Paris with easy access to metro.",
+ },
+ )
+
+ # Missing description
+ client.hset(
+ "property:3",
+ mapping={
+ "title": "Beachfront Bungalow",
+ "features": "beachfront,sun deck",
+ },
+ )
+
+ with pytest.raises(redis.exceptions.ResponseError) as e:
+ client.ft().search(
+ Query("ismissing(@title)").dialect(2).return_field("id").no_content()
+ )
+ assert "to be defined with 'INDEXMISSING'" in e.value.args[0]
+
+ res = client.ft().search(
+ Query("ismissing(@features)").dialect(2).return_field("id").no_content()
+ )
+ _assert_search_result(client, res, ["property:2"])
+
+ res = client.ft().search(
+ Query("-ismissing(@features)").dialect(2).return_field("id").no_content()
+ )
+ _assert_search_result(client, res, ["property:1", "property:3"])
+
+ res = client.ft().search(
+ Query("ismissing(@description)").dialect(2).return_field("id").no_content()
+ )
+ _assert_search_result(client, res, ["property:3"])
+
+ res = client.ft().search(
+ Query("-ismissing(@description)").dialect(2).return_field("id").no_content()
+ )
+ _assert_search_result(client, res, ["property:1", "property:2"])
+
+
+@pytest.mark.redismod
+def test_search_empty_fields(client):
+ definition = IndexDefinition(prefix=["property:"], index_type=IndexType.HASH)
+
+ fields = [
+ TextField("title", sortable=True),
+ TagField("features", index_empty=True),
+ TextField("description", index_empty=True),
+ ]
+
+ client.ft().create_index(fields, definition=definition)
+
+ # All fields present
+ client.hset(
+ "property:1",
+ mapping={
+ "title": "Luxury Villa in Malibu",
+ "features": "pool,sea view,modern",
+ "description": "A stunning modern villa overlooking the Pacific Ocean.",
+ },
+ )
+
+ # Empty features
+ client.hset(
+ "property:2",
+ mapping={
+ "title": "Downtown Flat",
+ "features": "",
+ "description": "Modern flat in central Paris with easy access to metro.",
+ },
+ )
+
+ # Empty description
+ client.hset(
+ "property:3",
+ mapping={
+ "title": "Beachfront Bungalow",
+ "features": "beachfront,sun deck",
+ "description": "",
+ },
+ )
+
+ with pytest.raises(redis.exceptions.ResponseError) as e:
+ client.ft().search(
+ Query("@title:''").dialect(2).return_field("id").no_content()
+ )
+ assert "Use `INDEXEMPTY` in field creation" in e.value.args[0]
+
+ res = client.ft().search(
+ Query("@features:{$empty}").dialect(2).return_field("id").no_content(),
+ query_params={"empty": ""},
+ )
+ _assert_search_result(client, res, ["property:2"])
+
+ res = client.ft().search(
+ Query("-@features:{$empty}").dialect(2).return_field("id").no_content(),
+ query_params={"empty": ""},
+ )
+ _assert_search_result(client, res, ["property:1", "property:3"])
+
+ res = client.ft().search(
+ Query("@description:''").dialect(2).return_field("id").no_content()
+ )
+ _assert_search_result(client, res, ["property:3"])
+
+ res = client.ft().search(
+ Query("-@description:''").dialect(2).return_field("id").no_content()
+ )
+ _assert_search_result(client, res, ["property:1", "property:2"])
+
+
+@pytest.mark.redismod
+def test_special_characters_in_fields(client):
+ definition = IndexDefinition(prefix=["resource:"], index_type=IndexType.HASH)
+
+ fields = [
+ TagField("uuid"),
+ TagField("tags", separator="|"),
+ TextField("description"),
+ NumericField("rating"),
+ ]
+
+ client.ft().create_index(fields, definition=definition)
+
+ client.hset(
+ "resource:1",
+ mapping={
+ "uuid": "123e4567-e89b-12d3-a456-426614174000",
+ "tags": "finance|crypto|$btc|blockchain",
+ "description": "Analysis of blockchain technologies & Bitcoin's potential.",
+ "rating": 5,
+ },
+ )
+
+ client.hset(
+ "resource:2",
+ mapping={
+ "uuid": "987e6543-e21c-12d3-a456-426614174999",
+ "tags": "health|well-being|fitness|new-year's-resolutions",
+ "description": "Health trends for the new year, including fitness regimes.",
+ "rating": 4,
+ },
+ )
+
+ # no need to escape - when using params
+ res = client.ft().search(
+ Query("@uuid:{$uuid}").dialect(2),
+ query_params={"uuid": "123e4567-e89b-12d3-a456-426614174000"},
+ )
+ _assert_search_result(client, res, ["resource:1"])
+
+ # with double quotes exact match no need to escape the - even without params
+ res = client.ft().search(
+ Query('@uuid:{"123e4567-e89b-12d3-a456-426614174000"}').dialect(2)
+ )
+ _assert_search_result(client, res, ["resource:1"])
+
+ res = client.ft().search(Query('@tags:{"new-year\'s-resolutions"}').dialect(2))
+ _assert_search_result(client, res, ["resource:2"])
+
+ # possible to search numeric fields by single value
+ res = client.ft().search(Query("@rating:[4]").dialect(2))
+ _assert_search_result(client, res, ["resource:2"])
+
+ # some chars still need escaping
+ res = client.ft().search(Query(r"@tags:{\$btc}").dialect(2))
+ _assert_search_result(client, res, ["resource:1"])
+
+
+def _assert_search_result(client, result, expected_doc_ids):
+ """
+ Make sure the result of a geo search is as expected, taking into account the RESP
+ version being used.
+ """
+ if is_resp2_connection(client):
+ assert set([doc.id for doc in result.docs]) == set(expected_doc_ids)
+ else:
+ assert set([doc["id"] for doc in result["results"]]) == set(expected_doc_ids)
diff --git a/tests/test_ssl.py b/tests/test_ssl.py
index 0e91750aa5..fc7416dbc7 100644
--- a/tests/test_ssl.py
+++ b/tests/test_ssl.py
@@ -18,21 +18,25 @@ class TestSSL:
and connecting to the appropriate port.
"""
+ CA_CERT = get_ssl_filename("ca-cert.pem")
+ CLIENT_CERT = get_ssl_filename("client-cert.pem")
+ CLIENT_KEY = get_ssl_filename("client-key.pem")
SERVER_CERT = get_ssl_filename("server-cert.pem")
- SERVER_KEY = get_ssl_filename("server-key.pem")
def test_ssl_with_invalid_cert(self, request):
ssl_url = request.config.option.redis_ssl_url
sslclient = redis.from_url(ssl_url)
with pytest.raises(ConnectionError) as e:
sslclient.ping()
- assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e)
+ assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e)
+ sslclient.close()
def test_ssl_connection(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(host=p[0], port=p[1], ssl=True, ssl_cert_reqs="none")
assert r.ping()
+ r.close()
def test_ssl_connection_without_ssl(self, request):
ssl_url = request.config.option.redis_ssl_url
@@ -41,7 +45,8 @@ def test_ssl_connection_without_ssl(self, request):
with pytest.raises(ConnectionError) as e:
r.ping()
- assert "Connection closed by server" in str(e)
+ assert "Connection closed by server" in str(e)
+ r.close()
def test_validating_self_signed_certificate(self, request):
ssl_url = request.config.option.redis_ssl_url
@@ -50,15 +55,16 @@ def test_validating_self_signed_certificate(self, request):
host=p[0],
port=p[1],
ssl=True,
- ssl_certfile=self.SERVER_CERT,
- ssl_keyfile=self.SERVER_KEY,
+ ssl_certfile=self.CLIENT_CERT,
+ ssl_keyfile=self.CLIENT_KEY,
ssl_cert_reqs="required",
- ssl_ca_certs=self.SERVER_CERT,
+ ssl_ca_certs=self.CA_CERT,
)
assert r.ping()
+ r.close()
def test_validating_self_signed_string_certificate(self, request):
- with open(self.SERVER_CERT) as f:
+ with open(self.CA_CERT) as f:
cert_data = f.read()
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
@@ -66,12 +72,13 @@ def test_validating_self_signed_string_certificate(self, request):
host=p[0],
port=p[1],
ssl=True,
- ssl_certfile=self.SERVER_CERT,
- ssl_keyfile=self.SERVER_KEY,
+ ssl_certfile=self.CLIENT_CERT,
+ ssl_keyfile=self.CLIENT_KEY,
ssl_cert_reqs="required",
ssl_ca_data=cert_data,
)
assert r.ping()
+ r.close()
@pytest.mark.parametrize(
"ssl_ciphers",
@@ -142,10 +149,10 @@ def _create_oscp_conn(self, request):
host=p[0],
port=p[1],
ssl=True,
- ssl_certfile=self.SERVER_CERT,
- ssl_keyfile=self.SERVER_KEY,
+ ssl_certfile=self.CLIENT_CERT,
+ ssl_keyfile=self.CLIENT_KEY,
ssl_cert_reqs="required",
- ssl_ca_certs=self.SERVER_CERT,
+ ssl_ca_certs=self.CA_CERT,
ssl_validate_ocsp=True,
)
return r
@@ -154,22 +161,17 @@ def _create_oscp_conn(self, request):
def test_ssl_ocsp_called(self, request):
r = self._create_oscp_conn(request)
with pytest.raises(RedisError) as e:
- assert r.ping()
- assert "cryptography not installed" in str(e)
+ r.ping()
+ assert "cryptography is not installed" in str(e)
+ r.close()
@skip_if_nocryptography()
def test_ssl_ocsp_called_withcrypto(self, request):
r = self._create_oscp_conn(request)
with pytest.raises(ConnectionError) as e:
assert r.ping()
- assert "No AIA information present in ssl certificate" in str(e)
-
- # rediss://, url based
- ssl_url = request.config.option.redis_ssl_url
- sslclient = redis.from_url(ssl_url)
- with pytest.raises(ConnectionError) as e:
- sslclient.ping()
- assert "No AIA information present in ssl certificate" in str(e)
+ assert "No AIA information present in ssl certificate" in str(e)
+ r.close()
@skip_if_nocryptography()
def test_valid_ocsp_cert_http(self):
@@ -194,7 +196,7 @@ def test_revoked_ocsp_certificate(self):
ocsp = OCSPVerifier(wrapped, hostname, 443)
with pytest.raises(ConnectionError) as e:
assert ocsp.is_valid()
- assert "REVOKED" in str(e)
+ assert "REVOKED" in str(e)
@skip_if_nocryptography()
def test_unauthorized_ocsp(self):
@@ -219,7 +221,7 @@ def test_ocsp_not_present_in_response(self):
ocsp = OCSPVerifier(wrapped, hostname, 443)
with pytest.raises(ConnectionError) as e:
assert ocsp.is_valid()
- assert "from the" in str(e)
+ assert "from the" in str(e)
@skip_if_nocryptography()
def test_unauthorized_then_direct(self):
@@ -245,49 +247,52 @@ def test_mock_ocsp_staple(self, request):
host=p[0],
port=p[1],
ssl=True,
- ssl_certfile=self.SERVER_CERT,
- ssl_keyfile=self.SERVER_KEY,
+ ssl_certfile=self.CLIENT_CERT,
+ ssl_keyfile=self.CLIENT_KEY,
ssl_cert_reqs="required",
- ssl_ca_certs=self.SERVER_CERT,
+ ssl_ca_certs=self.CA_CERT,
ssl_validate_ocsp=True,
ssl_ocsp_context=p, # just needs to not be none
)
with pytest.raises(RedisError):
r.ping()
+ r.close()
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
- ctx.use_certificate_file(self.SERVER_CERT)
- ctx.use_privatekey_file(self.SERVER_KEY)
+ ctx.use_certificate_file(self.CLIENT_CERT)
+ ctx.use_privatekey_file(self.CLIENT_KEY)
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
- ssl_certfile=self.SERVER_CERT,
- ssl_keyfile=self.SERVER_KEY,
+ ssl_certfile=self.CLIENT_CERT,
+ ssl_keyfile=self.CLIENT_KEY,
ssl_cert_reqs="required",
- ssl_ca_certs=self.SERVER_CERT,
+ ssl_ca_certs=self.CA_CERT,
ssl_ocsp_context=ctx,
- ssl_ocsp_expected_cert=open(self.SERVER_KEY, "rb").read(),
+ ssl_ocsp_expected_cert=open(self.SERVER_CERT, "rb").read(),
ssl_validate_ocsp_stapled=True,
)
with pytest.raises(ConnectionError) as e:
r.ping()
- assert "no ocsp response present" in str(e)
+ assert "no ocsp response present" in str(e)
+ r.close()
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
- ssl_certfile=self.SERVER_CERT,
- ssl_keyfile=self.SERVER_KEY,
+ ssl_certfile=self.CLIENT_CERT,
+ ssl_keyfile=self.CLIENT_KEY,
ssl_cert_reqs="required",
- ssl_ca_certs=self.SERVER_CERT,
+ ssl_ca_certs=self.CA_CERT,
ssl_validate_ocsp_stapled=True,
)
with pytest.raises(ConnectionError) as e:
r.ping()
- assert "no ocsp response present" in str(e)
+ assert "no ocsp response present" in str(e)
+ r.close()
diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py
index 60472e0194..90e627ef6e 100644
--- a/tests/test_timeseries.py
+++ b/tests/test_timeseries.py
@@ -5,7 +5,20 @@
import pytest
import redis
-from .conftest import assert_resp_response, is_resp2_connection, skip_ifmodversion_lt
+from .conftest import (
+ _get_client,
+ assert_resp_response,
+ is_resp2_connection,
+ skip_ifmodversion_lt,
+)
+
+
+@pytest.fixture()
+def decoded_r(request, stack_url):
+ with _get_client(
+ redis.Redis, request, decode_responses=True, from_url=stack_url
+ ) as client:
+ yield client
@pytest.fixture
@@ -14,6 +27,7 @@ def client(decoded_r):
return decoded_r
+@pytest.mark.redismod
def test_create(client):
assert client.ts().create(1)
assert client.ts().create(2, retention_msecs=5)
@@ -31,6 +45,7 @@ def test_create(client):
assert_resp_response(client, 128, info.get("chunk_size"), info.get("chunkSize"))
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
def test_create_duplicate_policy(client):
# Test for duplicate policy
@@ -46,6 +61,7 @@ def test_create_duplicate_policy(client):
)
+@pytest.mark.redismod
def test_alter(client):
assert client.ts().create(1)
info = client.ts().info(1)
@@ -66,8 +82,9 @@ def test_alter(client):
)
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-def test_alter_diplicate_policy(client):
+def test_alter_duplicate_policy(client):
assert client.ts().create(1)
info = client.ts().info(1)
assert_resp_response(
@@ -80,6 +97,7 @@ def test_alter_diplicate_policy(client):
)
+@pytest.mark.redismod
def test_add(client):
assert 1 == client.ts().add(1, 1, 1)
assert 2 == client.ts().add(2, 2, 3, retention_msecs=10)
@@ -102,47 +120,53 @@ def test_add(client):
assert_resp_response(client, 128, info.get("chunk_size"), info.get("chunkSize"))
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-def test_add_duplicate_policy(client):
+def test_add_on_duplicate(client):
# Test for duplicate policy BLOCK
assert 1 == client.ts().add("time-serie-add-ooo-block", 1, 5.0)
with pytest.raises(Exception):
- client.ts().add("time-serie-add-ooo-block", 1, 5.0, duplicate_policy="block")
+ client.ts().add("time-serie-add-ooo-block", 1, 5.0, on_duplicate="block")
# Test for duplicate policy LAST
assert 1 == client.ts().add("time-serie-add-ooo-last", 1, 5.0)
- assert 1 == client.ts().add(
- "time-serie-add-ooo-last", 1, 10.0, duplicate_policy="last"
- )
+ assert 1 == client.ts().add("time-serie-add-ooo-last", 1, 10.0, on_duplicate="last")
assert 10.0 == client.ts().get("time-serie-add-ooo-last")[1]
# Test for duplicate policy FIRST
assert 1 == client.ts().add("time-serie-add-ooo-first", 1, 5.0)
assert 1 == client.ts().add(
- "time-serie-add-ooo-first", 1, 10.0, duplicate_policy="first"
+ "time-serie-add-ooo-first", 1, 10.0, on_duplicate="first"
)
assert 5.0 == client.ts().get("time-serie-add-ooo-first")[1]
# Test for duplicate policy MAX
assert 1 == client.ts().add("time-serie-add-ooo-max", 1, 5.0)
- assert 1 == client.ts().add(
- "time-serie-add-ooo-max", 1, 10.0, duplicate_policy="max"
- )
+ assert 1 == client.ts().add("time-serie-add-ooo-max", 1, 10.0, on_duplicate="max")
assert 10.0 == client.ts().get("time-serie-add-ooo-max")[1]
# Test for duplicate policy MIN
assert 1 == client.ts().add("time-serie-add-ooo-min", 1, 5.0)
- assert 1 == client.ts().add(
- "time-serie-add-ooo-min", 1, 10.0, duplicate_policy="min"
- )
+ assert 1 == client.ts().add("time-serie-add-ooo-min", 1, 10.0, on_duplicate="min")
assert 5.0 == client.ts().get("time-serie-add-ooo-min")[1]
+@pytest.mark.redismod
def test_madd(client):
client.ts().create("a")
assert [1, 2, 3] == client.ts().madd([("a", 1, 5), ("a", 2, 10), ("a", 3, 15)])
+@pytest.mark.redismod
+def test_madd_missing_timeseries(client):
+ response = client.ts().madd([("a", 1, 5), ("a", 2, 10)])
+ assert isinstance(response, list)
+ assert len(response) == 2
+ assert isinstance(response[0], redis.ResponseError)
+ assert isinstance(response[1], redis.ResponseError)
+
+
+@pytest.mark.redismod
def test_incrby_decrby(client):
for _ in range(100):
assert client.ts().incrby(1, 1)
@@ -171,7 +195,7 @@ def test_incrby_decrby(client):
assert_resp_response(client, 128, info.get("chunk_size"), info.get("chunkSize"))
-@pytest.mark.onlynoncluster
+@pytest.mark.redismod
def test_create_and_delete_rule(client):
# test rule creation
time = 100
@@ -195,12 +219,13 @@ def test_create_and_delete_rule(client):
assert not info["rules"]
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
def test_del_range(client):
try:
client.ts().delete("test", 0, 100)
- except Exception as e:
- assert e.__str__() != ""
+ except redis.ResponseError as e:
+ assert "key does not exist" in str(e)
for i in range(100):
client.ts().add(1, i, i % 7)
@@ -209,6 +234,7 @@ def test_del_range(client):
assert_resp_response(client, client.ts().range(1, 22, 22), [(22, 1.0)], [[22, 1.0]])
+@pytest.mark.redismod
def test_range(client):
for i in range(100):
client.ts().add(1, i, i % 7)
@@ -223,7 +249,8 @@ def test_range(client):
assert 10 == len(client.ts().range(1, 0, 500, count=10))
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
def test_range_advanced(client):
for i in range(100):
client.ts().add(1, i, i % 7)
@@ -252,6 +279,7 @@ def test_range_advanced(client):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_range_latest(client: redis.Redis):
timeseries = client.ts()
@@ -276,6 +304,7 @@ def test_range_latest(client: redis.Redis):
)
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_range_bucket_timestamp(client: redis.Redis):
timeseries = client.ts()
@@ -309,6 +338,7 @@ def test_range_bucket_timestamp(client: redis.Redis):
)
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_range_empty(client: redis.Redis):
timeseries = client.ts()
@@ -353,7 +383,8 @@ def test_range_empty(client: redis.Redis):
assert_resp_response(client, res, resp2_expected, resp3_expected)
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
def test_rev_range(client):
for i in range(100):
client.ts().add(1, i, i % 7)
@@ -401,6 +432,7 @@ def test_rev_range(client):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_revrange_latest(client: redis.Redis):
timeseries = client.ts()
@@ -419,6 +451,7 @@ def test_revrange_latest(client: redis.Redis):
assert_resp_response(client, res, [(0, 4.0)], [[0, 4.0]])
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_revrange_bucket_timestamp(client: redis.Redis):
timeseries = client.ts()
@@ -452,6 +485,7 @@ def test_revrange_bucket_timestamp(client: redis.Redis):
)
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_revrange_empty(client: redis.Redis):
timeseries = client.ts()
@@ -497,6 +531,7 @@ def test_revrange_empty(client: redis.Redis):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
def test_mrange(client):
client.ts().create(1, labels={"Test": "This", "team": "ny"})
client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
@@ -545,7 +580,8 @@ def test_mrange(client):
@pytest.mark.onlynoncluster
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
def test_multi_range_advanced(client):
client.ts().create(1, labels={"Test": "This", "team": "ny"})
client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
@@ -658,6 +694,7 @@ def test_multi_range_advanced(client):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_mrange_latest(client: redis.Redis):
timeseries = client.ts()
@@ -687,7 +724,8 @@ def test_mrange_latest(client: redis.Redis):
@pytest.mark.onlynoncluster
-@skip_ifmodversion_lt("99.99.99", "timeseries")
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.10.0", "timeseries")
def test_multi_reverse_range(client):
client.ts().create(1, labels={"Test": "This", "team": "ny"})
client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
@@ -805,6 +843,7 @@ def test_multi_reverse_range(client):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_mrevrange_latest(client: redis.Redis):
timeseries = client.ts()
@@ -833,6 +872,7 @@ def test_mrevrange_latest(client: redis.Redis):
)
+@pytest.mark.redismod
def test_get(client):
name = "test"
client.ts().create(name)
@@ -844,6 +884,7 @@ def test_get(client):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_get_latest(client: redis.Redis):
timeseries = client.ts()
@@ -861,6 +902,7 @@ def test_get_latest(client: redis.Redis):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
def test_mget(client):
client.ts().create(1, labels={"Test": "This"})
client.ts().create(2, labels={"Test": "This", "Taste": "That"})
@@ -896,6 +938,7 @@ def test_mget(client):
@pytest.mark.onlynoncluster
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.8.0", "timeseries")
def test_mget_latest(client: redis.Redis):
timeseries = client.ts()
@@ -912,6 +955,7 @@ def test_mget_latest(client: redis.Redis):
assert_resp_response(client, res, [{"t2": [{}, 10, 8.0]}], {"t2": [{}, [10, 8.0]]})
+@pytest.mark.redismod
def test_info(client):
client.ts().create(1, retention_msecs=5, labels={"currentLabel": "currentData"})
info = client.ts().info(1)
@@ -921,8 +965,9 @@ def test_info(client):
assert info["labels"]["currentLabel"] == "currentData"
+@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
-def testInfoDuplicatePolicy(client):
+def test_info_duplicate_policy(client):
client.ts().create(1, retention_msecs=5, labels={"currentLabel": "currentData"})
info = client.ts().info(1)
assert_resp_response(
@@ -936,6 +981,7 @@ def testInfoDuplicatePolicy(client):
)
+@pytest.mark.redismod
@pytest.mark.onlynoncluster
def test_query_index(client):
client.ts().create(1, labels={"Test": "This"})
@@ -945,6 +991,7 @@ def test_query_index(client):
assert_resp_response(client, client.ts().queryindex(["Taste=That"]), [2], {"2"})
+@pytest.mark.redismod
def test_pipeline(client):
pipeline = client.ts().pipeline()
pipeline.create("with_pipeline")
@@ -962,12 +1009,179 @@ def test_pipeline(client):
assert client.ts().get("with_pipeline")[1] == 99 * 1.1
+@pytest.mark.redismod
def test_uncompressed(client):
client.ts().create("compressed")
client.ts().create("uncompressed", uncompressed=True)
+ for i in range(1000):
+ client.ts().add("compressed", i, i)
+ client.ts().add("uncompressed", i, i)
compressed_info = client.ts().info("compressed")
uncompressed_info = client.ts().info("uncompressed")
if is_resp2_connection(client):
- assert compressed_info.memory_usage != uncompressed_info.memory_usage
+ assert compressed_info.memory_usage < uncompressed_info.memory_usage
else:
- assert compressed_info["memoryUsage"] != uncompressed_info["memoryUsage"]
+ assert compressed_info["memoryUsage"] < uncompressed_info["memoryUsage"]
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+def test_create_with_insertion_filters(client):
+ client.ts().create(
+ "time-series-1",
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+ assert 1000 == client.ts().add("time-series-1", 1000, 1.0)
+ assert 1010 == client.ts().add("time-series-1", 1010, 11.0)
+ assert 1010 == client.ts().add("time-series-1", 1013, 10.0)
+ assert 1020 == client.ts().add("time-series-1", 1020, 11.5)
+ assert 1021 == client.ts().add("time-series-1", 1021, 22.0)
+
+ data_points = client.ts().range("time-series-1", "-", "+")
+ assert_resp_response(
+ client,
+ data_points,
+ [(1000, 1.0), (1010, 11.0), (1020, 11.5), (1021, 22.0)],
+ [[1000, 1.0], [1010, 11.0], [1020, 11.5], [1021, 22.0]],
+ )
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+def test_create_with_insertion_filters_other_duplicate_policy(client):
+ client.ts().create(
+ "time-series-1",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+ assert 1000 == client.ts().add("time-series-1", 1000, 1.0)
+ assert 1010 == client.ts().add("time-series-1", 1010, 11.0)
+ # Still accepted because the duplicate_policy is not `last`.
+ assert 1013 == client.ts().add("time-series-1", 1013, 10.0)
+
+ data_points = client.ts().range("time-series-1", "-", "+")
+ assert_resp_response(
+ client,
+ data_points,
+ [(1000, 1.0), (1010, 11.0), (1013, 10)],
+ [[1000, 1.0], [1010, 11.0], [1013, 10]],
+ )
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+def test_alter_with_insertion_filters(client):
+ assert 1000 == client.ts().add("time-series-1", 1000, 1.0)
+ assert 1010 == client.ts().add("time-series-1", 1010, 11.0)
+ assert 1013 == client.ts().add("time-series-1", 1013, 10.0)
+
+ client.ts().alter(
+ "time-series-1",
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+
+ assert 1013 == client.ts().add("time-series-1", 1015, 11.5)
+
+ data_points = client.ts().range("time-series-1", "-", "+")
+ assert_resp_response(
+ client,
+ data_points,
+ [(1000, 1.0), (1010, 11.0), (1013, 10.0)],
+ [[1000, 1.0], [1010, 11.0], [1013, 10.0]],
+ )
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+def test_add_with_insertion_filters(client):
+ assert 1000 == client.ts().add(
+ "time-series-1",
+ 1000,
+ 1.0,
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+
+ assert 1000 == client.ts().add("time-series-1", 1004, 3.0)
+
+ data_points = client.ts().range("time-series-1", "-", "+")
+ assert_resp_response(client, data_points, [(1000, 1.0)], [[1000, 1.0]])
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+def test_incrby_with_insertion_filters(client):
+ assert 1000 == client.ts().incrby(
+ "time-series-1",
+ 1.0,
+ timestamp=1000,
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+
+ assert 1000 == client.ts().incrby("time-series-1", 3.0, timestamp=1000)
+
+ data_points = client.ts().range("time-series-1", "-", "+")
+ assert_resp_response(client, data_points, [(1000, 1.0)], [[1000, 1.0]])
+
+ assert 1000 == client.ts().incrby("time-series-1", 10.1, timestamp=1000)
+
+ data_points = client.ts().range("time-series-1", "-", "+")
+ assert_resp_response(client, data_points, [(1000, 11.1)], [[1000, 11.1]])
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+def test_decrby_with_insertion_filters(client):
+ assert 1000 == client.ts().decrby(
+ "time-series-1",
+ 1.0,
+ timestamp=1000,
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+
+ assert 1000 == client.ts().decrby("time-series-1", 3.0, timestamp=1000)
+
+ data_points = client.ts().range("time-series-1", "-", "+")
+ assert_resp_response(client, data_points, [(1000, -1.0)], [[1000, -1.0]])
+
+ assert 1000 == client.ts().decrby("time-series-1", 10.1, timestamp=1000)
+
+ data_points = client.ts().range("time-series-1", "-", "+")
+ assert_resp_response(client, data_points, [(1000, -11.1)], [[1000, -11.1]])
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.12.0", "timeseries")
+def test_madd_with_insertion_filters(client):
+ client.ts().create(
+ "time-series-1",
+ duplicate_policy="last",
+ ignore_max_time_diff=5,
+ ignore_max_val_diff=10.0,
+ )
+ assert 1010 == client.ts().add("time-series-1", 1010, 1.0)
+ assert [1010, 1010, 1020, 1021] == client.ts().madd(
+ [
+ ("time-series-1", 1011, 11.0),
+ ("time-series-1", 1013, 10.0),
+ ("time-series-1", 1020, 2.0),
+ ("time-series-1", 1021, 22.0),
+ ]
+ )
+
+ data_points = client.ts().range("time-series-1", "-", "+")
+ assert_resp_response(
+ client,
+ data_points,
+ [(1010, 1.0), (1020, 2.0), (1021, 22.0)],
+ [[1010, 1.0], [1020, 2.0], [1021, 22.0]],
+ )