From 79f8e2697319b4d54670498e965687cbebc220f8 Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 15 Jan 2025 15:40:29 -0800 Subject: [PATCH 01/15] check for edge query --- src/groundlight/client.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 0a8d4626..712fce74 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -999,6 +999,15 @@ def wait_for_confident_result( confidence_threshold = self.get_detector(image_query.detector_id).confidence_threshold confidence_above_thresh = partial(iq_is_confident, confidence_threshold=confidence_threshold) # type: ignore + + def is_from_edge(iq: ImageQuery) -> bool: + return iq.metadata and iq.metadata.get("is_from_edge", False) + + if is_from_edge(image_query) and not confidence_above_thresh(image_query): + # If the query is from the edge and the confidence is not above the threshold, it means the client wants + # only edge answers, so we don't want to poll the cloud. + return image_query + return self._wait_for_result(image_query, condition=confidence_above_thresh, timeout_sec=timeout_sec) def wait_for_ml_result(self, image_query: Union[ImageQuery, str], timeout_sec: float = 30.0) -> ImageQuery: From be0218945e86354b65ebde03c6e80381929e8149 Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 15 Jan 2025 16:50:44 -0800 Subject: [PATCH 02/15] move to _wait_for_result and don't short circuit the waiting --- src/groundlight/client.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 712fce74..167e3b75 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -1000,14 +1000,6 @@ def wait_for_confident_result( confidence_above_thresh = partial(iq_is_confident, confidence_threshold=confidence_threshold) # type: ignore - def is_from_edge(iq: ImageQuery) -> bool: - return iq.metadata and iq.metadata.get("is_from_edge", False) - - if is_from_edge(image_query) and not confidence_above_thresh(image_query): - # If the query is from the edge and the confidence is not above the threshold, it means the client wants - # only edge answers, so we don't want to poll the cloud. - return image_query - return self._wait_for_result(image_query, condition=confidence_above_thresh, timeout_sec=timeout_sec) def wait_for_ml_result(self, image_query: Union[ImageQuery, str], timeout_sec: float = 30.0) -> ImageQuery: @@ -1082,8 +1074,21 @@ def _wait_for_result( logger.debug(f"Polling ({target_delay:.1f}/{timeout_sec:.0f}s) {image_query} until result is available") time.sleep(sleep_time) next_delay *= self.POLLING_EXPONENTIAL_BACKOFF - image_query = self.get_image_query(image_query.id) - image_query = self._fixup_image_query(image_query) + + def is_from_edge(iq: ImageQuery) -> bool: + return iq.metadata and iq.metadata.get("is_from_edge", False) + + if is_from_edge(image_query) and not condition(image_query): + # If the query is from the edge and the condition is not met, it means the client wanted only edge + # answers, so we don't want to poll the cloud and we should eventually return whatever the edge response + # was. We'll wait the remaining time to stay consistent with the behavior of the wait parameter. + logger.debug( + "The image query is from the edge and the client wanted only edge answers, so we are not" + " attempting to get a result from the cloud." + ) + else: + image_query = self.get_image_query(image_query.id) + image_query = self._fixup_image_query(image_query) return image_query def add_label( From b69b244b547ce888eba50b12904bae1e6d80d77c Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 15 Jan 2025 17:17:38 -0800 Subject: [PATCH 03/15] remove line --- src/groundlight/client.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 167e3b75..42fe2016 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -999,7 +999,6 @@ def wait_for_confident_result( confidence_threshold = self.get_detector(image_query.detector_id).confidence_threshold confidence_above_thresh = partial(iq_is_confident, confidence_threshold=confidence_threshold) # type: ignore - return self._wait_for_result(image_query, condition=confidence_above_thresh, timeout_sec=timeout_sec) def wait_for_ml_result(self, image_query: Union[ImageQuery, str], timeout_sec: float = 30.0) -> ImageQuery: From bafb4a64cd34396b9849cc934df19692941a3e3c Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Thu, 20 Feb 2025 15:09:21 -0800 Subject: [PATCH 04/15] this doesn't work because the cloud iq escalated from the edge will look the same --- src/groundlight/client.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 23e06058..c213cfaf 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -997,6 +997,18 @@ def wait_for_confident_result( :meth:`get_image_query` for checking result status without blocking :meth:`wait_for_ml_result` for waiting until the first ML result is available """ + + def is_from_edge(iq: ImageQuery) -> bool: + return iq.metadata and iq.metadata.get("is_from_edge", False) + + if is_from_edge(image_query): + # If the query is from the edge, there is nothing to wait for. + logger.debug( + "The image query is from the edge and the client wanted only edge answers, so we are not" + " attempting to get a result from the cloud." + ) + return image_query + if isinstance(image_query, str): image_query = self.get_image_query(image_query) confidence_threshold = self.get_detector(image_query.detector_id).confidence_threshold @@ -1076,21 +1088,8 @@ def _wait_for_result( logger.debug(f"Polling ({target_delay:.1f}/{timeout_sec:.0f}s) {image_query} until result is available") time.sleep(sleep_time) next_delay *= self.POLLING_EXPONENTIAL_BACKOFF - - def is_from_edge(iq: ImageQuery) -> bool: - return iq.metadata and iq.metadata.get("is_from_edge", False) - - if is_from_edge(image_query) and not condition(image_query): - # If the query is from the edge and the condition is not met, it means the client wanted only edge - # answers, so we don't want to poll the cloud and we should eventually return whatever the edge response - # was. We'll wait the remaining time to stay consistent with the behavior of the wait parameter. - logger.debug( - "The image query is from the edge and the client wanted only edge answers, so we are not" - " attempting to get a result from the cloud." - ) - else: - image_query = self.get_image_query(image_query.id) - image_query = self._fixup_image_query(image_query) + image_query = self.get_image_query(image_query.id) + image_query = self._fixup_image_query(image_query) return image_query def add_label( From ac9399496a3f59bf817cdd6983766711d0485ae3 Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 5 Mar 2025 22:47:37 +0000 Subject: [PATCH 05/15] update code based on latest backend changes --- generated/docs/BinaryClassificationResult.md | 1 + generated/docs/CountingResult.md | 1 + generated/docs/LabelValueRequest.md | 2 +- generated/docs/MultiClassificationResult.md | 1 + generated/docs/SourceEnum.md | 4 +- generated/docs/TextRecognitionResult.md | 3 +- .../model/binary_classification_result.py | 8 + .../model/counting_result.py | 8 + .../model/label_value_request.py | 9 +- .../model/multi_classification_result.py | 8 + .../model/patched_detector_request.py | 151 +++++++----------- .../model/source_enum.py | 9 +- .../model/text_recognition_result.py | 17 +- generated/model.py | 29 +++- spec/public-api.yaml | 29 +++- 15 files changed, 169 insertions(+), 111 deletions(-) diff --git a/generated/docs/BinaryClassificationResult.md b/generated/docs/BinaryClassificationResult.md index 40cf272e..4a9fb8fb 100644 --- a/generated/docs/BinaryClassificationResult.md +++ b/generated/docs/BinaryClassificationResult.md @@ -7,6 +7,7 @@ Name | Type | Description | Notes **label** | **str** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] +**result_type** | **str** | | [optional] if omitted the server will use the default value of "binary_classification" **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/CountingResult.md b/generated/docs/CountingResult.md index 3af68072..a79620db 100644 --- a/generated/docs/CountingResult.md +++ b/generated/docs/CountingResult.md @@ -7,6 +7,7 @@ Name | Type | Description | Notes **count** | **int, none_type** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] +**result_type** | **str** | | [optional] if omitted the server will use the default value of "counting" **greater_than_max** | **bool** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/LabelValueRequest.md b/generated/docs/LabelValueRequest.md index a6934ab5..baa15c2f 100644 --- a/generated/docs/LabelValueRequest.md +++ b/generated/docs/LabelValueRequest.md @@ -4,7 +4,7 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**label** | **str** | | +**label** | **str, none_type** | | **image_query_id** | **str** | | **rois** | [**[ROIRequest], none_type**](ROIRequest.md) | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/MultiClassificationResult.md b/generated/docs/MultiClassificationResult.md index 5bd3cfc4..8b928073 100644 --- a/generated/docs/MultiClassificationResult.md +++ b/generated/docs/MultiClassificationResult.md @@ -7,6 +7,7 @@ Name | Type | Description | Notes **label** | **str** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] +**result_type** | **str** | | [optional] if omitted the server will use the default value of "multi_classification" **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/SourceEnum.md b/generated/docs/SourceEnum.md index d2b0b9af..65fe757d 100644 --- a/generated/docs/SourceEnum.md +++ b/generated/docs/SourceEnum.md @@ -1,11 +1,11 @@ # SourceEnum -* `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear +* `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**value** | **str** | * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear | must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] +**value** | **str** | * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE | must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/TextRecognitionResult.md b/generated/docs/TextRecognitionResult.md index d50b80df..0d5aeedb 100644 --- a/generated/docs/TextRecognitionResult.md +++ b/generated/docs/TextRecognitionResult.md @@ -4,10 +4,11 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**text** | **str** | | +**text** | **str, none_type** | | **truncated** | **bool** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] +**result_type** | **str** | | [optional] if omitted the server will use the default value of "text_recognition" **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/groundlight_openapi_client/model/binary_classification_result.py b/generated/groundlight_openapi_client/model/binary_classification_result.py index d4f5ebc4..53c63b1b 100644 --- a/generated/groundlight_openapi_client/model/binary_classification_result.py +++ b/generated/groundlight_openapi_client/model/binary_classification_result.py @@ -65,6 +65,10 @@ class BinaryClassificationResult(ModelNormal): "USER": "USER", "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", "ALGORITHM": "ALGORITHM", + "EDGE": "EDGE", + }, + ("result_type",): { + "BINARY_CLASSIFICATION": "binary_classification", }, } @@ -112,6 +116,7 @@ def openapi_types(): none_type, ), # noqa: E501 "source": (str,), # noqa: E501 + "result_type": (str,), # noqa: E501 } @cached_property @@ -122,6 +127,7 @@ def discriminator(): "label": "label", # noqa: E501 "confidence": "confidence", # noqa: E501 "source": "source", # noqa: E501 + "result_type": "result_type", # noqa: E501 } read_only_vars = {} @@ -169,6 +175,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 + result_type (str): [optional] if omitted the server will use the default value of "binary_classification" # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -259,6 +266,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 + result_type (str): [optional] if omitted the server will use the default value of "binary_classification" # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/counting_result.py b/generated/groundlight_openapi_client/model/counting_result.py index 953a1acf..00dfc34a 100644 --- a/generated/groundlight_openapi_client/model/counting_result.py +++ b/generated/groundlight_openapi_client/model/counting_result.py @@ -60,6 +60,10 @@ class CountingResult(ModelNormal): "USER": "USER", "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", "ALGORITHM": "ALGORITHM", + "EDGE": "EDGE", + }, + ("result_type",): { + "COUNTING": "counting", }, } @@ -113,6 +117,7 @@ def openapi_types(): none_type, ), # noqa: E501 "source": (str,), # noqa: E501 + "result_type": (str,), # noqa: E501 "greater_than_max": (bool,), # noqa: E501 } @@ -124,6 +129,7 @@ def discriminator(): "count": "count", # noqa: E501 "confidence": "confidence", # noqa: E501 "source": "source", # noqa: E501 + "result_type": "result_type", # noqa: E501 "greater_than_max": "greater_than_max", # noqa: E501 } @@ -172,6 +178,7 @@ def _from_openapi_data(cls, count, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 + result_type (str): [optional] if omitted the server will use the default value of "counting" # noqa: E501 greater_than_max (bool): [optional] # noqa: E501 """ @@ -263,6 +270,7 @@ def __init__(self, count, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 + result_type (str): [optional] if omitted the server will use the default value of "counting" # noqa: E501 greater_than_max (bool): [optional] # noqa: E501 """ diff --git a/generated/groundlight_openapi_client/model/label_value_request.py b/generated/groundlight_openapi_client/model/label_value_request.py index 6a25d37f..434f8ad8 100644 --- a/generated/groundlight_openapi_client/model/label_value_request.py +++ b/generated/groundlight_openapi_client/model/label_value_request.py @@ -100,7 +100,10 @@ def openapi_types(): """ lazy_import() return { - "label": (str,), # noqa: E501 + "label": ( + str, + none_type, + ), # noqa: E501 "image_query_id": (str,), # noqa: E501 "rois": ( [ROIRequest], @@ -128,7 +131,7 @@ def _from_openapi_data(cls, label, image_query_id, *args, **kwargs): # noqa: E5 """LabelValueRequest - a model defined in OpenAPI Args: - label (str): + label (str, none_type): image_query_id (str): Keyword Args: @@ -219,7 +222,7 @@ def __init__(self, label, image_query_id, *args, **kwargs): # noqa: E501 """LabelValueRequest - a model defined in OpenAPI Args: - label (str): + label (str, none_type): image_query_id (str): Keyword Args: diff --git a/generated/groundlight_openapi_client/model/multi_classification_result.py b/generated/groundlight_openapi_client/model/multi_classification_result.py index 177574a7..97c66110 100644 --- a/generated/groundlight_openapi_client/model/multi_classification_result.py +++ b/generated/groundlight_openapi_client/model/multi_classification_result.py @@ -60,6 +60,10 @@ class MultiClassificationResult(ModelNormal): "USER": "USER", "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", "ALGORITHM": "ALGORITHM", + "EDGE": "EDGE", + }, + ("result_type",): { + "MULTI_CLASSIFICATION": "multi_classification", }, } @@ -107,6 +111,7 @@ def openapi_types(): none_type, ), # noqa: E501 "source": (str,), # noqa: E501 + "result_type": (str,), # noqa: E501 } @cached_property @@ -117,6 +122,7 @@ def discriminator(): "label": "label", # noqa: E501 "confidence": "confidence", # noqa: E501 "source": "source", # noqa: E501 + "result_type": "result_type", # noqa: E501 } read_only_vars = {} @@ -164,6 +170,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 + result_type (str): [optional] if omitted the server will use the default value of "multi_classification" # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -254,6 +261,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 + result_type (str): [optional] if omitted the server will use the default value of "multi_classification" # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/patched_detector_request.py b/generated/groundlight_openapi_client/model/patched_detector_request.py index 251cb75d..64534047 100644 --- a/generated/groundlight_openapi_client/model/patched_detector_request.py +++ b/generated/groundlight_openapi_client/model/patched_detector_request.py @@ -8,6 +8,7 @@ Generated by: https://openapi-generator.tech """ + import re # noqa: F401 import sys # noqa: F401 @@ -24,7 +25,7 @@ file_type, none_type, validate_get_composed_info, - OpenApiModel, + OpenApiModel ) from groundlight_openapi_client.exceptions import ApiAttributeError @@ -33,10 +34,9 @@ def lazy_import(): from groundlight_openapi_client.model.blank_enum import BlankEnum from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum from groundlight_openapi_client.model.status_enum import StatusEnum - - globals()["BlankEnum"] = BlankEnum - globals()["EscalationTypeEnum"] = EscalationTypeEnum - globals()["StatusEnum"] = StatusEnum + globals()['BlankEnum'] = BlankEnum + globals()['EscalationTypeEnum'] = EscalationTypeEnum + globals()['StatusEnum'] = StatusEnum class PatchedDetectorRequest(ModelNormal): @@ -63,20 +63,21 @@ class PatchedDetectorRequest(ModelNormal): as additional properties values. """ - allowed_values = {} + allowed_values = { + } validations = { - ("name",): { - "max_length": 200, - "min_length": 1, + ('name',): { + 'max_length': 200, + 'min_length': 1, }, - ("confidence_threshold",): { - "inclusive_maximum": 1.0, - "inclusive_minimum": 0.0, + ('confidence_threshold',): { + 'inclusive_maximum': 1.0, + 'inclusive_minimum': 0.0, }, - ("patience_time",): { - "inclusive_maximum": 3600, - "inclusive_minimum": 0, + ('patience_time',): { + 'inclusive_maximum': 3600, + 'inclusive_minimum': 0, }, } @@ -87,17 +88,7 @@ def additional_properties_type(): of type self, this must run after the class is loaded """ lazy_import() - return ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ) # noqa: E501 + return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @@ -113,46 +104,28 @@ def openapi_types(): """ lazy_import() return { - "name": (str,), # noqa: E501 - "confidence_threshold": (float,), # noqa: E501 - "patience_time": (float,), # noqa: E501 - "status": ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ), # noqa: E501 - "escalation_type": ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ), # noqa: E501 + 'name': (str,), # noqa: E501 + 'confidence_threshold': (float,), # noqa: E501 + 'patience_time': (float,), # noqa: E501 + 'status': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501 + 'escalation_type': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501 } @cached_property def discriminator(): return None + attribute_map = { - "name": "name", # noqa: E501 - "confidence_threshold": "confidence_threshold", # noqa: E501 - "patience_time": "patience_time", # noqa: E501 - "status": "status", # noqa: E501 - "escalation_type": "escalation_type", # noqa: E501 + 'name': 'name', # noqa: E501 + 'confidence_threshold': 'confidence_threshold', # noqa: E501 + 'patience_time': 'patience_time', # noqa: E501 + 'status': 'status', # noqa: E501 + 'escalation_type': 'escalation_type', # noqa: E501 } - read_only_vars = {} + read_only_vars = { + } _composed_schemas = {} @@ -199,18 +172,17 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 """ - _check_type = kwargs.pop("_check_type", True) - _spec_property_naming = kwargs.pop("_spec_property_naming", False) - _path_to_item = kwargs.pop("_path_to_item", ()) - _configuration = kwargs.pop("_configuration", None) - _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." - % ( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), @@ -226,24 +198,22 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): - if ( - var_name not in self.attribute_map - and self._configuration is not None - and self._configuration.discard_unknown_keys - and self.additional_properties_type is None - ): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ - "_data_store", - "_check_type", - "_spec_property_naming", - "_path_to_item", - "_configuration", - "_visited_composed_classes", + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', ]) @convert_js_args_to_python_args @@ -288,16 +258,15 @@ def __init__(self, *args, **kwargs): # noqa: E501 escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 """ - _check_type = kwargs.pop("_check_type", True) - _spec_property_naming = kwargs.pop("_spec_property_naming", False) - _path_to_item = kwargs.pop("_path_to_item", ()) - _configuration = kwargs.pop("_configuration", None) - _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." - % ( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), @@ -313,17 +282,13 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): - if ( - var_name not in self.attribute_map - and self._configuration is not None - and self._configuration.discard_unknown_keys - and self.additional_properties_type is None - ): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: - raise ApiAttributeError( - f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " - "class with read only attributes." - ) + raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes.") diff --git a/generated/groundlight_openapi_client/model/source_enum.py b/generated/groundlight_openapi_client/model/source_enum.py index aee5be85..4addbc10 100644 --- a/generated/groundlight_openapi_client/model/source_enum.py +++ b/generated/groundlight_openapi_client/model/source_enum.py @@ -58,6 +58,7 @@ class SourceEnum(ModelSimple): "ALG": "ALG", "ALG_REC": "ALG_REC", "ALG_UNCLEAR": "ALG_UNCLEAR", + "EDGE": "EDGE", }, } @@ -107,10 +108,10 @@ def __init__(self, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] # noqa: E501 + args[0] (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 Keyword Args: - value (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] # noqa: E501 + value (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. @@ -199,10 +200,10 @@ def _from_openapi_data(cls, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] # noqa: E501 + args[0] (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 Keyword Args: - value (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] # noqa: E501 + value (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. diff --git a/generated/groundlight_openapi_client/model/text_recognition_result.py b/generated/groundlight_openapi_client/model/text_recognition_result.py index ca44ff65..9b12e960 100644 --- a/generated/groundlight_openapi_client/model/text_recognition_result.py +++ b/generated/groundlight_openapi_client/model/text_recognition_result.py @@ -60,6 +60,10 @@ class TextRecognitionResult(ModelNormal): "USER": "USER", "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", "ALGORITHM": "ALGORITHM", + "EDGE": "EDGE", + }, + ("result_type",): { + "TEXT_RECOGNITION": "text_recognition", }, } @@ -101,13 +105,17 @@ def openapi_types(): and the value is attribute type. """ return { - "text": (str,), # noqa: E501 + "text": ( + str, + none_type, + ), # noqa: E501 "truncated": (bool,), # noqa: E501 "confidence": ( float, none_type, ), # noqa: E501 "source": (str,), # noqa: E501 + "result_type": (str,), # noqa: E501 } @cached_property @@ -119,6 +127,7 @@ def discriminator(): "truncated": "truncated", # noqa: E501 "confidence": "confidence", # noqa: E501 "source": "source", # noqa: E501 + "result_type": "result_type", # noqa: E501 } read_only_vars = {} @@ -131,7 +140,7 @@ def _from_openapi_data(cls, text, truncated, *args, **kwargs): # noqa: E501 """TextRecognitionResult - a model defined in OpenAPI Args: - text (str): + text (str, none_type): truncated (bool): Keyword Args: @@ -167,6 +176,7 @@ def _from_openapi_data(cls, text, truncated, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 + result_type (str): [optional] if omitted the server will use the default value of "text_recognition" # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -223,7 +233,7 @@ def __init__(self, text, truncated, *args, **kwargs): # noqa: E501 """TextRecognitionResult - a model defined in OpenAPI Args: - text (str): + text (str, none_type): truncated (bool): Keyword Args: @@ -259,6 +269,7 @@ def __init__(self, text, truncated, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 + result_type (str): [optional] if omitted the server will use the default value of "text_recognition" # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/model.py b/generated/model.py index 767e43c9..d2df2890 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2025-02-25T19:28:25+00:00 +# timestamp: 2025-03-05T22:46:43+00:00 from __future__ import annotations @@ -168,6 +168,7 @@ class SourceEnum(str, Enum): * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear + * `EDGE` - EDGE """ INITIAL_PLACEHOLDER = "INITIAL_PLACEHOLDER" @@ -177,6 +178,7 @@ class SourceEnum(str, Enum): ALG = "ALG" ALG_REC = "ALG_REC" ALG_UNCLEAR = "ALG_UNCLEAR" + EDGE = "EDGE" class StatusEnum(str, Enum): @@ -223,6 +225,11 @@ class Source(str, Enum): USER = "USER" CLOUD_ENSEMBLE = "CLOUD_ENSEMBLE" ALGORITHM = "ALGORITHM" + EDGE = "EDGE" + + +class ResultType(str, Enum): + binary_classification = "binary_classification" class Label(str, Enum): @@ -234,26 +241,42 @@ class Label(str, Enum): class BinaryClassificationResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None source: Optional[Source] = None + result_type: Optional[ResultType] = None label: Label +class ResultType2(str, Enum): + counting = "counting" + + class CountingResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None source: Optional[Source] = None + result_type: Optional[ResultType2] = None count: Optional[conint(ge=0)] = Field(...) greater_than_max: Optional[bool] = None +class ResultType3(str, Enum): + multi_classification = "multi_classification" + + class MultiClassificationResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None source: Optional[Source] = None + result_type: Optional[ResultType3] = None label: str +class ResultType4(str, Enum): + text_recognition = "text_recognition" + + class TextRecognitionResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None source: Optional[Source] = None - text: str + result_type: Optional[ResultType4] = None + text: Optional[str] = Field(...) truncated: bool @@ -423,7 +446,7 @@ class LabelValue(BaseModel): class LabelValueRequest(BaseModel): - label: str + label: Optional[str] = Field(...) image_query_id: constr(min_length=1) rois: Optional[List[ROIRequest]] = None diff --git a/spec/public-api.yaml b/spec/public-api.yaml index d09fec91..94785eb9 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -1178,6 +1178,7 @@ components: label: type: string writeOnly: true + nullable: true image_query_id: type: string writeOnly: true @@ -1508,6 +1509,7 @@ components: - ALG - ALG_REC - ALG_UNCLEAR + - EDGE type: string description: |- * `INITIAL_PLACEHOLDER` - InitialPlaceholder @@ -1517,6 +1519,7 @@ components: * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear + * `EDGE` - EDGE StatusEnum: enum: - 'ON' @@ -1587,6 +1590,11 @@ components: - USER - CLOUD_ENSEMBLE - ALGORITHM + - EDGE + result_type: + type: string + enum: + - binary_classification label: type: string enum: @@ -1612,6 +1620,11 @@ components: - USER - CLOUD_ENSEMBLE - ALGORITHM + - EDGE + result_type: + type: string + enum: + - counting count: type: integer minimum: 0 @@ -1637,8 +1650,14 @@ components: - USER - CLOUD_ENSEMBLE - ALGORITHM + - EDGE + result_type: + type: string + enum: + - multi_classification label: type: string + nullable: false required: - label TextRecognitionResult: @@ -1658,8 +1677,14 @@ components: - USER - CLOUD_ENSEMBLE - ALGORITHM + - EDGE + result_type: + type: string + enum: + - text_recognition text: type: string + nullable: true truncated: type: boolean required: @@ -1675,6 +1700,7 @@ components: nullable: false class_name: type: string + nullable: false required: - class_name MultiClassModeConfiguration: @@ -1684,6 +1710,7 @@ components: type: array items: type: string + nullable: false num_classes: type: integer nullable: false @@ -1732,4 +1759,4 @@ servers: - url: https://device.positronix.ai/device-api description: Device Prod - url: https://device.integ.positronix.ai/device-api - description: Device Integ + description: Device Integ \ No newline at end of file From 603b99053b7713bd65b098384aa5de93b8f5e3bb Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 5 Mar 2025 23:09:29 +0000 Subject: [PATCH 06/15] a better approach --- src/groundlight/client.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 261abe31..b2dd37c9 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -25,6 +25,7 @@ ImageQuery, PaginatedDetectorList, PaginatedImageQueryList, + Source, ) from urllib3.exceptions import InsecureRequestWarning @@ -969,6 +970,7 @@ def wait_for_confident_result( 1. A result with confidence >= confidence_threshold is available 2. The timeout_sec is reached 3. An error occurs + If the image query is from the edge, the result is returned immediately and not waited for. **Example usage**:: @@ -1002,17 +1004,13 @@ def wait_for_confident_result( :meth:`get_image_query` for checking result status without blocking :meth:`wait_for_ml_result` for waiting until the first ML result is available """ - - def is_from_edge(iq: ImageQuery) -> bool: - return iq.metadata and iq.metadata.get("is_from_edge", False) - - if is_from_edge(image_query): - # If the query is from the edge, there is nothing to wait for. - logger.debug( - "The image query is from the edge and the client wanted only edge answers, so we are not" - " attempting to get a result from the cloud." - ) - return image_query + if isinstance(image_query, ImageQuery): + if image_query.result and image_query.result.source and image_query.result.source == Source.EDGE: + logger.debug( + "The image query is from the edge, so we are returning it immediately and not waiting for a " + "confident result." + ) + return image_query if isinstance(image_query, str): image_query = self.get_image_query(image_query) From 92c9598a34af879ea54d3d56fdcea7c48810b2c6 Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 12 Mar 2025 10:37:38 -0700 Subject: [PATCH 07/15] s --- .../model/patched_detector_request.py | 167 +++++++++++------- spec/public-api.yaml | 2 +- 2 files changed, 103 insertions(+), 66 deletions(-) diff --git a/generated/groundlight_openapi_client/model/patched_detector_request.py b/generated/groundlight_openapi_client/model/patched_detector_request.py index 64534047..5274443b 100644 --- a/generated/groundlight_openapi_client/model/patched_detector_request.py +++ b/generated/groundlight_openapi_client/model/patched_detector_request.py @@ -1,14 +1,13 @@ """ - Groundlight API +Groundlight API - Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 +Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech +The version of the OpenAPI document: 0.18.2 +Contact: support@groundlight.ai +Generated by: https://openapi-generator.tech """ - import re # noqa: F401 import sys # noqa: F401 @@ -25,7 +24,7 @@ file_type, none_type, validate_get_composed_info, - OpenApiModel + OpenApiModel, ) from groundlight_openapi_client.exceptions import ApiAttributeError @@ -34,9 +33,10 @@ def lazy_import(): from groundlight_openapi_client.model.blank_enum import BlankEnum from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum from groundlight_openapi_client.model.status_enum import StatusEnum - globals()['BlankEnum'] = BlankEnum - globals()['EscalationTypeEnum'] = EscalationTypeEnum - globals()['StatusEnum'] = StatusEnum + + globals()["BlankEnum"] = BlankEnum + globals()["EscalationTypeEnum"] = EscalationTypeEnum + globals()["StatusEnum"] = StatusEnum class PatchedDetectorRequest(ModelNormal): @@ -63,21 +63,20 @@ class PatchedDetectorRequest(ModelNormal): as additional properties values. """ - allowed_values = { - } + allowed_values = {} validations = { - ('name',): { - 'max_length': 200, - 'min_length': 1, + ("name",): { + "max_length": 200, + "min_length": 1, }, - ('confidence_threshold',): { - 'inclusive_maximum': 1.0, - 'inclusive_minimum': 0.0, + ("confidence_threshold",): { + "inclusive_maximum": 1.0, + "inclusive_minimum": 0.0, }, - ('patience_time',): { - 'inclusive_maximum': 3600, - 'inclusive_minimum': 0, + ("patience_time",): { + "inclusive_maximum": 3600, + "inclusive_minimum": 0, }, } @@ -88,7 +87,17 @@ def additional_properties_type(): of type self, this must run after the class is loaded """ lazy_import() - return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 + return ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ) # noqa: E501 _nullable = False @@ -104,28 +113,46 @@ def openapi_types(): """ lazy_import() return { - 'name': (str,), # noqa: E501 - 'confidence_threshold': (float,), # noqa: E501 - 'patience_time': (float,), # noqa: E501 - 'status': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501 - 'escalation_type': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501 + "name": (str,), # noqa: E501 + "confidence_threshold": (float,), # noqa: E501 + "patience_time": (float,), # noqa: E501 + "status": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 + "escalation_type": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 } @cached_property def discriminator(): return None - attribute_map = { - 'name': 'name', # noqa: E501 - 'confidence_threshold': 'confidence_threshold', # noqa: E501 - 'patience_time': 'patience_time', # noqa: E501 - 'status': 'status', # noqa: E501 - 'escalation_type': 'escalation_type', # noqa: E501 + "name": "name", # noqa: E501 + "confidence_threshold": "confidence_threshold", # noqa: E501 + "patience_time": "patience_time", # noqa: E501 + "status": "status", # noqa: E501 + "escalation_type": "escalation_type", # noqa: E501 } - read_only_vars = { - } + read_only_vars = {} _composed_schemas = {} @@ -172,17 +199,18 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 """ - _check_type = kwargs.pop('_check_type', True) - _spec_property_naming = kwargs.pop('_spec_property_naming', False) - _path_to_item = kwargs.pop('_path_to_item', ()) - _configuration = kwargs.pop('_configuration', None) - _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( args, self.__class__.__name__, ), @@ -198,23 +226,27 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): - if var_name not in self.attribute_map and \ - self._configuration is not None and \ - self._configuration.discard_unknown_keys and \ - self.additional_properties_type is None: + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): # discard variable. continue setattr(self, var_name, var_value) return self - required_properties = set([ - '_data_store', - '_check_type', - '_spec_property_naming', - '_path_to_item', - '_configuration', - '_visited_composed_classes', - ]) + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 @@ -258,15 +290,16 @@ def __init__(self, *args, **kwargs): # noqa: E501 escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 """ - _check_type = kwargs.pop('_check_type', True) - _spec_property_naming = kwargs.pop('_spec_property_naming', False) - _path_to_item = kwargs.pop('_path_to_item', ()) - _configuration = kwargs.pop('_configuration', None) - _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) if args: raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( args, self.__class__.__name__, ), @@ -282,13 +315,17 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): - if var_name not in self.attribute_map and \ - self._configuration is not None and \ - self._configuration.discard_unknown_keys and \ - self.additional_properties_type is None: + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: - raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " - f"class with read only attributes.") + raise ApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + "class with read only attributes." + ) diff --git a/spec/public-api.yaml b/spec/public-api.yaml index fba49135..bb11e4ff 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -1764,4 +1764,4 @@ servers: - url: https://device.positronix.ai/device-api description: Device Prod - url: https://device.integ.positronix.ai/device-api - description: Device Integ \ No newline at end of file + description: Device Integ From f758c76ff8d3a611461be4a0b98505e7219a600b Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 12 Mar 2025 10:38:28 -0700 Subject: [PATCH 08/15] s2 --- .../model/patched_detector_request.py | 30 +++++++++---------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/generated/groundlight_openapi_client/model/patched_detector_request.py b/generated/groundlight_openapi_client/model/patched_detector_request.py index 5274443b..35c6f42c 100644 --- a/generated/groundlight_openapi_client/model/patched_detector_request.py +++ b/generated/groundlight_openapi_client/model/patched_detector_request.py @@ -1,11 +1,11 @@ """ -Groundlight API + Groundlight API -Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 -The version of the OpenAPI document: 0.18.2 -Contact: support@groundlight.ai -Generated by: https://openapi-generator.tech + The version of the OpenAPI document: 0.18.2 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech """ import re # noqa: F401 @@ -237,16 +237,14 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 setattr(self, var_name, var_value) return self - required_properties = set( - [ - "_data_store", - "_check_type", - "_spec_property_naming", - "_path_to_item", - "_configuration", - "_visited_composed_classes", - ] - ) + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 @@ -328,4 +326,4 @@ def __init__(self, *args, **kwargs): # noqa: E501 raise ApiAttributeError( f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " "class with read only attributes." - ) + ) \ No newline at end of file From 272869ca3531841d3856367d99e5d316373a50d9 Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 12 Mar 2025 10:38:43 -0700 Subject: [PATCH 09/15] s3 --- .../model/patched_detector_request.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generated/groundlight_openapi_client/model/patched_detector_request.py b/generated/groundlight_openapi_client/model/patched_detector_request.py index 35c6f42c..251cb75d 100644 --- a/generated/groundlight_openapi_client/model/patched_detector_request.py +++ b/generated/groundlight_openapi_client/model/patched_detector_request.py @@ -326,4 +326,4 @@ def __init__(self, *args, **kwargs): # noqa: E501 raise ApiAttributeError( f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " "class with read only attributes." - ) \ No newline at end of file + ) From 40bbe11c4ff9d2d980056cb034e0ff2b51b580e3 Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 12 Mar 2025 10:53:38 -0700 Subject: [PATCH 10/15] use done_processing and EDGE source --- src/groundlight/client.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/groundlight/client.py b/src/groundlight/client.py index b2dd37c9..a8bf080b 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -1004,17 +1004,29 @@ def wait_for_confident_result( :meth:`get_image_query` for checking result status without blocking :meth:`wait_for_ml_result` for waiting until the first ML result is available """ - if isinstance(image_query, ImageQuery): + + def should_stop_waiting(image_query: ImageQuery) -> bool: + """Checks if the image query should be returned immediately because no better answer is expected.""" + if image_query.done_processing: + logger.debug( + "The image query has completed escalating and will receive no new results, so we are " + "returning it immediately." + ) + return True if image_query.result and image_query.result.source and image_query.result.source == Source.EDGE: logger.debug( - "The image query is from the edge, so we are returning it immediately and not waiting for a " - "confident result." + "The image query was answered on the edge, so we are returning it immediately and not waiting for " + "a confident result." ) - return image_query + return True + return False if isinstance(image_query, str): image_query = self.get_image_query(image_query) + if should_stop_waiting(image_query): + return image_query + if confidence_threshold is None: confidence_threshold = self.get_detector(image_query.detector_id).confidence_threshold From d40f6cbf727de59bf55a3b72a3e6cd6d67bc47d7 Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 12 Mar 2025 10:54:16 -0700 Subject: [PATCH 11/15] update docstring --- src/groundlight/client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/groundlight/client.py b/src/groundlight/client.py index a8bf080b..db88b283 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -970,7 +970,8 @@ def wait_for_confident_result( 1. A result with confidence >= confidence_threshold is available 2. The timeout_sec is reached 3. An error occurs - If the image query is from the edge, the result is returned immediately and not waited for. + + If the image query was answered on the edge or is done_processing, the result is returned immediately. **Example usage**:: From 0f369314d3a615c51e04dce9052888ae1e2c60f7 Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 12 Mar 2025 11:30:22 -0700 Subject: [PATCH 12/15] test for wait behavior --- test/unit/test_wait.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 test/unit/test_wait.py diff --git a/test/unit/test_wait.py b/test/unit/test_wait.py new file mode 100644 index 00000000..11e70a4e --- /dev/null +++ b/test/unit/test_wait.py @@ -0,0 +1,19 @@ +from groundlight import ExperimentalApi +from unittest.mock import patch +from model import ImageQuery, Source, BinaryClassificationResult, Label + + +def test_wait_for_confident_result_returns_immediately_when_no_better_result_expected( + gl_experimental: ExperimentalApi, initial_iq: ImageQuery +): + with patch.object(gl_experimental, "_wait_for_result") as mock_wait_for_result: + # Should not wait if the image query is done processing + initial_iq.done_processing = True + gl_experimental.wait_for_confident_result(initial_iq) + mock_wait_for_result.assert_not_called() + + # Should not wait if the result is from the edge + initial_iq.done_processing = False + initial_iq.result = BinaryClassificationResult(source=Source.EDGE, label=Label.YES) + gl_experimental.wait_for_confident_result(initial_iq) + mock_wait_for_result.assert_not_called() From b5fa8448699ef1632ee687d746a1039922703f61 Mon Sep 17 00:00:00 2001 From: Auto-format Bot Date: Wed, 12 Mar 2025 18:31:11 +0000 Subject: [PATCH 13/15] Automatically reformatting code --- test/unit/test_wait.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/unit/test_wait.py b/test/unit/test_wait.py index 11e70a4e..000d1b88 100644 --- a/test/unit/test_wait.py +++ b/test/unit/test_wait.py @@ -1,6 +1,7 @@ -from groundlight import ExperimentalApi from unittest.mock import patch -from model import ImageQuery, Source, BinaryClassificationResult, Label + +from groundlight import ExperimentalApi +from model import BinaryClassificationResult, ImageQuery, Label, Source def test_wait_for_confident_result_returns_immediately_when_no_better_result_expected( From 479959e4dbcb87e3e0d45d1ecd0fdc014f019bbc Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Wed, 12 Mar 2025 12:02:25 -0700 Subject: [PATCH 14/15] trigger tests --- test/unit/test_wait.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/unit/test_wait.py b/test/unit/test_wait.py index 000d1b88..6862cc36 100644 --- a/test/unit/test_wait.py +++ b/test/unit/test_wait.py @@ -8,12 +8,12 @@ def test_wait_for_confident_result_returns_immediately_when_no_better_result_exp gl_experimental: ExperimentalApi, initial_iq: ImageQuery ): with patch.object(gl_experimental, "_wait_for_result") as mock_wait_for_result: - # Should not wait if the image query is done processing + # Shouldn't wait if the image query is done processing initial_iq.done_processing = True gl_experimental.wait_for_confident_result(initial_iq) mock_wait_for_result.assert_not_called() - # Should not wait if the result is from the edge + # Shouldn't wait if the result is from the edge initial_iq.done_processing = False initial_iq.result = BinaryClassificationResult(source=Source.EDGE, label=Label.YES) gl_experimental.wait_for_confident_result(initial_iq) From be8bf55d012d61f78d21f85e6be6a0c2e7794ce0 Mon Sep 17 00:00:00 2001 From: CoreyEWood Date: Mon, 31 Mar 2025 20:14:59 +0000 Subject: [PATCH 15/15] just putting this in to save it --- src/groundlight/client.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/groundlight/client.py b/src/groundlight/client.py index db88b283..fbfa4154 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -1072,6 +1072,14 @@ def wait_for_ml_result(self, image_query: Union[ImageQuery, str], timeout_sec: f :meth:`get_image_query` for checking result status without blocking :meth:`wait_for_confident_result` for waiting until a confident result is available """ + if isinstance(image_query, ImageQuery): + if image_query.result and image_query.result.source and image_query.result.source == Source.EDGE: + logger.debug( + "The image query is from the edge, so we are returning it immediately and not waiting for an ML " + "result." + ) + return image_query + # TODO I think this is lying - it doesn't raise a TimeoutError if there is no ML result within timeout_sec return self._wait_for_result(image_query, condition=iq_is_answered, timeout_sec=timeout_sec) def _wait_for_result(