@@ -536,3 +536,79 @@ def run_model_execute_from_llapi():
536
536
info = info_to_dict (con .execute_command ('AI.INFO' , 'm{1}' ))
537
537
env .assertGreaterEqual (info ['calls' ], 0 )
538
538
env .assertGreaterEqual (num_parallel_clients , info ['calls' ])
539
+
540
+
541
+ def test_ai_config (env ):
542
+ if not TEST_PT :
543
+ env .debugPrint ("skipping {} since TEST_PT=0" .format (sys ._getframe ().f_code .co_name ), force = True )
544
+ return
545
+
546
+ conns = env .getOSSMasterNodesConnectionList ()
547
+ if env .isCluster ():
548
+ env .assertEqual (len (conns ), env .shardsCount )
549
+
550
+ model = load_file_content ('pt-minimal.pt' )
551
+
552
+ for con in conns :
553
+ # Get the default configs.
554
+ res = con .execute_command ('AI.CONFIG' , 'GET' , 'BACKENDSPATH' )
555
+ env .assertEqual (res , None )
556
+ res = con .execute_command ('AI.CONFIG' , 'GET' , 'MODEL_CHUNK_SIZE' )
557
+ env .assertEqual (res , 511 * 1024 * 1024 )
558
+
559
+ # Change the default backends path and load backend.
560
+ path = f'{ ROOT } /install-{ DEVICE .lower ()} '
561
+ con .execute_command ('AI.CONFIG' , 'BACKENDSPATH' , path )
562
+ res = con .execute_command ('AI.CONFIG' , 'GET' , 'BACKENDSPATH' )
563
+ env .assertEqual (res , path .encode ())
564
+ be_info = get_info_section (con , "backends_info" )
565
+ env .assertEqual (len (be_info ), 0 ) # no backends are loaded.
566
+ check_error_message (env , con , 'error loading backend' , 'AI.CONFIG' , 'LOADBACKEND' , 'TORCH' , "." )
567
+
568
+ res = con .execute_command ('AI.CONFIG' , 'LOADBACKEND' , 'TORCH' , "backends/redisai_torch/redisai_torch.so" )
569
+ env .assertEqual (res , b'OK' )
570
+ be_info = get_info_section (con , "backends_info" )
571
+ env .assertEqual (len (be_info ), 1 ) # one backend is loaded now - torch.
572
+
573
+ # Set the same model twice on some shard - with and without chunks, and assert equality.
574
+ con = get_connection (env , '{1}' )
575
+ chunk_size = len (model ) // 3
576
+ model_chunks = [model [i :i + chunk_size ] for i in range (0 , len (model ), chunk_size )]
577
+ con .execute_command ('AI.MODELSTORE' , 'm1{1}' , 'TORCH' , DEVICE , 'BLOB' , model )
578
+ con .execute_command ('AI.MODELSTORE' , 'm2{1}' , 'TORCH' , DEVICE , 'BLOB' , * model_chunks )
579
+ model1 = con .execute_command ('AI.MODELGET' , 'm1{1}' , 'BLOB' )
580
+ model2 = con .execute_command ('AI.MODELGET' , 'm2{1}' , 'BLOB' )
581
+ env .assertEqual (model1 , model2 )
582
+
583
+ for con in conns :
584
+ # Change the default model_chunk_size.
585
+ ret = con .execute_command ('AI.CONFIG' , 'MODEL_CHUNK_SIZE' , chunk_size )
586
+ env .assertEqual (ret , b'OK' )
587
+ res = con .execute_command ('AI.CONFIG' , 'GET' , 'MODEL_CHUNK_SIZE' )
588
+ env .assertEqual (res , chunk_size )
589
+
590
+ # Verify that AI.MODELGET returns the model's blob in chunks, with or without the META arg.
591
+ con = get_connection (env , '{1}' )
592
+ model2 = con .execute_command ('AI.MODELGET' , 'm1{1}' , 'BLOB' )
593
+ env .assertEqual (len (model2 ), len (model_chunks ))
594
+ env .assertTrue (all ([el1 == el2 for el1 , el2 in zip (model2 , model_chunks )]))
595
+
596
+ model3 = con .execute_command ('AI.MODELGET' , 'm1{1}' , 'META' , 'BLOB' )[- 1 ] # Extract the BLOB list from the result
597
+ env .assertEqual (len (model3 ), len (model_chunks ))
598
+ env .assertTrue (all ([el1 == el2 for el1 , el2 in zip (model3 , model_chunks )]))
599
+
600
+
601
+ def test_ai_config_errors (env ):
602
+ con = get_connection (env , '{1}' )
603
+
604
+ check_error_message (env , con , "wrong number of arguments for 'AI.CONFIG' command" , 'AI.CONFIG' )
605
+ check_error_message (env , con , 'unsupported subcommand' , 'AI.CONFIG' , "bad_subcommand" )
606
+ check_error_message (env , con , "wrong number of arguments for 'AI.CONFIG' command" , 'AI.CONFIG' , 'LOADBACKEND' )
607
+ check_error_message (env , con , 'unsupported backend' , 'AI.CONFIG' , 'LOADBACKEND' , 'bad_backend' , "backends/redisai_torch/redisai_torch.so" )
608
+ check_error_message (env , con , "wrong number of arguments for 'AI.CONFIG' command" , 'AI.CONFIG' , 'LOADBACKEND' , "TORCH" )
609
+
610
+ check_error_message (env , con , 'BACKENDSPATH: missing path argument' , 'AI.CONFIG' , 'BACKENDSPATH' )
611
+ check_error_message (env , con , 'MODEL_CHUNK_SIZE: missing chunk size' , 'AI.CONFIG' , 'MODEL_CHUNK_SIZE' )
612
+
613
+ check_error_message (env , con , "wrong number of arguments for 'AI.CONFIG' command" , 'AI.CONFIG' , 'GET' )
614
+ env .assertEqual (con .execute_command ('AI.CONFIG' , 'GET' , 'bad_config' ), None )
0 commit comments