diff --git a/scripts/devops_tasks/validate_formatting.py b/scripts/devops_tasks/validate_formatting.py index 9e34291bc9e3..5e9dc1614f83 100644 --- a/scripts/devops_tasks/validate_formatting.py +++ b/scripts/devops_tasks/validate_formatting.py @@ -23,9 +23,12 @@ def run_black(glob_string, service_dir): results = [] logging.info("Running black for {}".format(service_dir)) - discovered_packages = discover_targeted_packages( - glob_string, os.path.join(root_dir, "sdk", service_dir) - ) + if service_dir and service_dir != "auto": + target_dir = os.path.join(root_dir, "sdk", service_dir) + else: + target_dir = root_dir + + discovered_packages = discover_targeted_packages(glob_string, target_dir) for package in discovered_packages: package_name = os.path.basename(package) @@ -43,7 +46,7 @@ def run_black(glob_string, service_dir): "-e", "black", "--", - os.path.join("sdk", service_dir, package_name), + package, ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, diff --git a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/_indirect_attack_simulator.py b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/_indirect_attack_simulator.py index 3ffc559d18a6..32b0f2fa99c1 100644 --- a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/_indirect_attack_simulator.py +++ b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/_indirect_attack_simulator.py @@ -191,7 +191,7 @@ async def __call__( template_parameters = completed_task.get("template_parameters", {}) # type: ignore xpia_attack_type = template_parameters.get("xpia_attack_type", "") # type: ignore action = template_parameters.get("action", "") # type: ignore - document_type = template_parameters.get("document_type", "") # type: ignore + document_type = template_parameters.get("document_type", "") # type: ignore sim_results.append( { "messages": completed_task["messages"], # type: ignore diff --git a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/_simulator.py b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/_simulator.py index c004bbfa87d3..835f623612ed 100644 --- a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/_simulator.py +++ b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/simulator/_simulator.py @@ -236,15 +236,19 @@ async def run_simulation(simulation: List[Union[str, Dict[str, Any]]]) -> JsonLi user_turn = Turn( role=ConversationRole.USER, content=str(simulated_turn.get("content")), - context=str(simulated_turn.get("context")) + context=str(simulated_turn.get("context")), ) else: - raise ValueError("Each simulated turn must be a string or a dict with 'content' and 'context' keys") + raise ValueError( + "Each simulated turn must be a string or a dict with 'content' and 'context' keys" + ) current_simulation.add_to_history(user_turn) assistant_response, assistant_context = await self._get_target_response( target=target, api_call_delay_sec=api_call_delay_sec, conversation_history=current_simulation ) - assistant_turn = Turn(role=ConversationRole.ASSISTANT, content=assistant_response, context=assistant_context) + assistant_turn = Turn( + role=ConversationRole.ASSISTANT, content=assistant_response, context=assistant_context + ) current_simulation.add_to_history(assistant_turn) async with progress_bar_lock: progress_bar.update(1) @@ -286,7 +290,7 @@ async def _extend_conversation_with_simulator( prompty_model_config: Dict[str, Any], target: Callable, progress_bar: tqdm, - progress_bar_lock: asyncio.Lock + progress_bar_lock: asyncio.Lock, ): """ Extends an ongoing conversation using a user simulator until the maximum number of turns is reached. @@ -329,7 +333,9 @@ async def _extend_conversation_with_simulator( assistant_response, assistant_context = await self._get_target_response( target=target, api_call_delay_sec=api_call_delay_sec, conversation_history=current_simulation ) - assistant_turn = Turn(role=ConversationRole.ASSISTANT, content=assistant_response, context=assistant_context) + assistant_turn = Turn( + role=ConversationRole.ASSISTANT, content=assistant_response, context=assistant_context + ) current_simulation.add_to_history(assistant_turn) async with progress_bar_lock: progress_bar.update(1) @@ -667,7 +673,9 @@ async def _complete_conversation( assistant_response, assistant_context = await self._get_target_response( target=target, api_call_delay_sec=api_call_delay_sec, conversation_history=conversation_history ) - assistant_turn = Turn(role=ConversationRole.ASSISTANT, content=assistant_response, context=assistant_context) + assistant_turn = Turn( + role=ConversationRole.ASSISTANT, content=assistant_response, context=assistant_context + ) conversation_history.add_to_history(assistant_turn) progress_bar.update(1) diff --git a/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_built_in_evaluator.py b/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_built_in_evaluator.py index 843d0acf258a..de171bf7115e 100644 --- a/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_built_in_evaluator.py +++ b/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_built_in_evaluator.py @@ -59,14 +59,17 @@ def test_retrieval_evaluator_keys(self, mock_model_config): conversation = { "messages": [ {"role": "user", "content": "What is the value of 2 + 2?"}, - {"role": "assistant", "content": "2 + 2 = 4", "context": { - "citations": [ + { + "role": "assistant", + "content": "2 + 2 = 4", + "context": { + "citations": [ {"id": "math_doc.md", "content": "Information about additions: 1 + 2 = 3, 2 + 2 = 4"} ] - } - } + }, + }, ] } result = retrieval_eval(conversation=conversation) - assert result["retrieval"] == result["gpt_retrieval"] == 1 \ No newline at end of file + assert result["retrieval"] == result["gpt_retrieval"] == 1