@@ -569,13 +569,14 @@ def run_stage(self, vm, stage, server_command, out, err, cwd, nonZeroIsFatal):
569569 mx .abort ("The server application unexpectedly ended with return code " + str (returnCode ))
570570
571571 if self .measureLatency :
572- # Calibrate for latency measurements (without RSS tracker)
573- with EmptyEnv (self .get_env ()):
574- measurementThread = self .startDaemonThread (BaseMicroserviceBenchmarkSuite .calibrateLatencyTestInBackground , [self ])
575- returnCode = mx .run (serverCommandWithoutTracker , out = out , err = err , cwd = cwd , nonZeroIsFatal = nonZeroIsFatal )
576- measurementThread .join ()
577- if not self .validateReturnCode (returnCode ):
578- mx .abort ("The server application unexpectedly ended with return code " + str (returnCode ))
572+ if not any ([c .get ("requests-per-second" ) for c in self .loadConfiguration ("latency" )]):
573+ # Calibrate for latency measurements (without RSS tracker) if no fixed request rate has been provided in the config
574+ with EmptyEnv (self .get_env ()):
575+ measurementThread = self .startDaemonThread (BaseMicroserviceBenchmarkSuite .calibrateLatencyTestInBackground , [self ])
576+ returnCode = mx .run (serverCommandWithoutTracker , out = out , err = err , cwd = cwd , nonZeroIsFatal = nonZeroIsFatal )
577+ measurementThread .join ()
578+ if not self .validateReturnCode (returnCode ):
579+ mx .abort ("The server application unexpectedly ended with return code " + str (returnCode ))
579580
580581 # Measure latency (without RSS tracker)
581582 with EmptyEnv (self .get_env ()):
@@ -696,12 +697,13 @@ def run(self, benchmarks, bmSuiteArgs):
696697 measurementThread .join ()
697698
698699 if self .measureLatency :
699- # Calibrate for latency measurements (without RSS tracker)
700- mx_benchmark .disable_tracker ()
701- with EmptyEnv (self .get_env ()):
702- measurementThread = self .startDaemonThread (BaseMicroserviceBenchmarkSuite .calibrateLatencyTestInBackground , [self ])
703- datapoints += super (BaseMicroserviceBenchmarkSuite , self ).run (benchmarks , remainder )
704- measurementThread .join ()
700+ if not [c .get ("requests-per-second" ) for c in self .loadConfiguration ("latency" ) if c .get ("requests-per-second" )]:
701+ # Calibrate for latency measurements (without RSS tracker) if no fixed request rate has been provided in the config
702+ mx_benchmark .disable_tracker ()
703+ with EmptyEnv (self .get_env ()):
704+ measurementThread = self .startDaemonThread (BaseMicroserviceBenchmarkSuite .calibrateLatencyTestInBackground , [self ])
705+ datapoints += super (BaseMicroserviceBenchmarkSuite , self ).run (benchmarks , remainder )
706+ measurementThread .join ()
705707
706708 # Measure latency (without RSS tracker)
707709 with EmptyEnv (self .get_env ()):
@@ -875,6 +877,7 @@ def loadConfiguration(self, groupKey):
875877 "script" : [<lua scripts that will be executed sequentially>],
876878 "warmup-requests-per-second" : [<requests per second during the warmup run (one entry per lua script)>],
877879 "warmup-duration" : [<duration of the warmup run (one entry per lua script)>],
880+ "requests-per-second" : [<requests per second during the run> (one entry per lua script)>],
878881 "duration" : [<duration of the test (one entry per lua script)>]
879882 }
880883 }
@@ -896,6 +899,7 @@ def loadConfiguration(self, groupKey):
896899 script = self .readConfig (group , "script" )
897900 warmupRequestsPerSecond = self .readConfig (group , "warmup-requests-per-second" )
898901 warmupDuration = self .readConfig (group , "warmup-duration" )
902+ requestsPerSecond = self .readConfig (group , "requests-per-second" , optional = True )
899903 duration = self .readConfig (group , "duration" )
900904
901905 scalarScriptValue = self .isScalarValue (script )
@@ -912,6 +916,8 @@ def loadConfiguration(self, groupKey):
912916 result ["warmup-requests-per-second" ] = warmupRequestsPerSecond
913917 result ["warmup-duration" ] = warmupDuration
914918 result ["duration" ] = duration
919+ if requestsPerSecond :
920+ result ["requests-per-second" ] = requestsPerSecond
915921 results .append (result )
916922 else :
917923 count = len (script )
@@ -927,15 +933,19 @@ def loadConfiguration(self, groupKey):
927933 result ["warmup-requests-per-second" ] = warmupRequestsPerSecond [i ]
928934 result ["warmup-duration" ] = warmupDuration [i ]
929935 result ["duration" ] = duration [i ]
936+ if requestsPerSecond :
937+ result ["requests-per-second" ] = requestsPerSecond [i ]
930938 results .append (result )
931939
932940 return results
933941
934- def readConfig (self , config , key ):
942+ def readConfig (self , config , key , optional = False ):
935943 if key in config :
936944 return config [key ]
945+ elif optional :
946+ return None
937947 else :
938- mx .abort (key + " not specified in Wrk configuration." )
948+ mx .abort (f"Mandatory entry { key } not specified in Wrk configuration." )
939949
940950 def isScalarValue (self , value ):
941951 return type (value ) in (int , float , bool ) or isinstance (value , ("" .__class__ , u"" .__class__ )) # pylint: disable=unidiomatic-typecheck
@@ -1012,7 +1022,12 @@ def testLatency(self):
10121022 for i in range (numScripts ):
10131023 # Measure latency using a constant rate (based on the previously measured max throughput).
10141024 config = configs [i ]
1015- expectedRate = int (self .calibratedThroughput [i ] * 0.75 )
1025+ if configs [i ].get ("requests-per-second" ):
1026+ expectedRate = configs [i ]["requests-per-second" ]
1027+ mx .log (f"Using configured fixed throughput { expectedRate } ops/s for latency measurements." )
1028+ else :
1029+ expectedRate = int (self .calibratedThroughput [i ] * 0.75 )
1030+ mx .log (f"Using dynamically computed throughput { expectedRate } ops/s for latency measurements (75% of max throughput)." )
10161031 wrkFlags = self .getLatencyFlags (config , expectedRate )
10171032 constantRateOutput = self .runWrk2 (wrkFlags )
10181033 self .verifyThroughput (constantRateOutput , expectedRate )
0 commit comments