diff --git a/model_analyzer/config/generate/brute_run_config_generator.py b/model_analyzer/config/generate/brute_run_config_generator.py index de97655dd..d226811aa 100755 --- a/model_analyzer/config/generate/brute_run_config_generator.py +++ b/model_analyzer/config/generate/brute_run_config_generator.py @@ -101,7 +101,8 @@ def get_configs(self) -> Generator[RunConfig, None, None]: def _get_next_config(self) -> Generator[RunConfig, None, None]: if not self._skip_default_config: yield from self._generate_subset(0, default_only=True) - yield from self._generate_subset(0, default_only=False) + + yield from self._generate_subset(0, default_only=False) def _generate_subset( self, index: int, default_only: bool diff --git a/model_analyzer/config/generate/manual_model_config_generator.py b/model_analyzer/config/generate/manual_model_config_generator.py index 6d4abc631..a95ae27ab 100755 --- a/model_analyzer/config/generate/manual_model_config_generator.py +++ b/model_analyzer/config/generate/manual_model_config_generator.py @@ -130,14 +130,7 @@ def _get_next_model_config_variant(self) -> ModelConfigVariant: def _generate_model_config_variants(self) -> List[List[ModelConfigVariant]]: """Generate all model config combinations""" - configs = self._generate_all_modes_model_config_variants() - return configs - - def _generate_all_modes_model_config_variants( - self, - ) -> List[List[ModelConfigVariant]]: - """Generate model configs for all modes""" model_config_variants = [] for param_combo in self._non_max_batch_size_param_combos: configs_with_max_batch_size = [] diff --git a/qa/L0_config_search/test.sh b/qa/L0_config_search/test.sh index 347f31efd..a029d3682 100755 --- a/qa/L0_config_search/test.sh +++ b/qa/L0_config_search/test.sh @@ -70,9 +70,6 @@ for launch_mode in $TRITON_LAUNCH_MODES; do MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS $MODEL_ANALYZER_PORTS" if [ $launch_mode == 'remote' ]; then - NUM_ROW_OUTPUT_FILE=`echo $config | sed 's/\.yml//'`-param-$launch_mode.txt - NUM_MODELS_OUTPUT_FILE=`echo $config | sed 's/\.yml//'`-models-$launch_mode.txt - # For remote launch, set server args and start server SERVER=`which tritonserver` SERVER_ARGS="--model-repository=$MODEL_REPOSITORY --model-control-mode=explicit --http-port $http_port --grpc-port $grpc_port --metrics-port $metrics_port" diff --git a/qa/L0_config_search/test_config_generator.py b/qa/L0_config_search/test_config_generator.py index 6fa72f912..79145d56c 100755 --- a/qa/L0_config_search/test_config_generator.py +++ b/qa/L0_config_search/test_config_generator.py @@ -70,7 +70,7 @@ def generate_search_disable(self): total_param_count = self._calculate_total_params( concurrency_count, instance_count ) - self._write_file(total_param_count, total_param_count, 2, 2, model_config) + self._write_file(total_param_count, 2, model_config) def generate_max_limit_with_model_config(self): concurrency_count = 2 @@ -91,7 +91,7 @@ def generate_max_limit_with_model_config(self): total_param_count = self._calculate_total_params( concurrency_count, instance_count ) - self._write_file(total_param_count, total_param_count, 2, 2, model_config) + self._write_file(total_param_count, 2, model_config) def generate_max_limit(self): concurrency_count = 2 @@ -105,7 +105,7 @@ def generate_max_limit(self): total_param_count = self._calculate_total_params( concurrency_count, instance_count ) - self._write_file(total_param_count, total_param_count, 8, 8, model_config) + self._write_file(total_param_count, 8, model_config) def generate_max_limit_with_param(self): concurrency_count = 1 # 1 because concurrency parameter is 1 entry below @@ -124,7 +124,7 @@ def generate_max_limit_with_param(self): total_param_count = self._calculate_total_params( concurrency_count, instance_count ) - self._write_file(total_param_count, total_param_count, 6, 6, model_config) + self._write_file(total_param_count, 6, model_config) def generate_max_limit_with_param_and_model_config(self): concurrency_count = 1 # 1 because concurrency parameter is 1 entry below @@ -146,7 +146,7 @@ def generate_max_limit_with_param_and_model_config(self): total_param_count = self._calculate_total_params( concurrency_count, instance_count ) - self._write_file(total_param_count, total_param_count, 2, 2, model_config) + self._write_file(total_param_count, 2, model_config) def generate_max_limit_with_dynamic_batch_disable(self): concurrency_count = 2 @@ -160,7 +160,7 @@ def generate_max_limit_with_dynamic_batch_disable(self): total_param_count = self._calculate_total_params( concurrency_count, instance_count ) - self._write_file(total_param_count, total_param_count, 4, 4, model_config) + self._write_file(total_param_count, 4, model_config) def _calculate_total_params( self, concurrency_count, instance_count, default_config_count=1 @@ -178,19 +178,13 @@ def _calculate_total_params( def _write_file( self, total_param, - total_param_remote, total_models, - total_models_remote, model_config, ): with open(f"./config-{self.test_id}-param.txt", "w") as file: file.write(str(total_param)) - with open(f"./config-{self.test_id}-param-remote.txt", "w") as file: - file.write(str(total_param_remote)) with open(f"./config-{self.test_id}-models.txt", "w") as file: file.write(str(total_models)) - with open(f"./config-{self.test_id}-models-remote.txt", "w") as file: - file.write(str(total_models_remote)) with open(f"./config-{self.test_id}.yml", "w") as file: yaml.dump(model_config, file)