diff --git a/generated/.openapi-generator/FILES b/generated/.openapi-generator/FILES index 5b99bab8..10087712 100644 --- a/generated/.openapi-generator/FILES +++ b/generated/.openapi-generator/FILES @@ -50,6 +50,7 @@ docs/RuleRequest.md docs/SnoozeTimeUnitEnum.md docs/SourceEnum.md docs/StatusEnum.md +docs/TextModeConfiguration.md docs/TextRecognitionResult.md docs/UserApi.md docs/VerbEnum.md @@ -114,6 +115,7 @@ groundlight_openapi_client/model/rule_request.py groundlight_openapi_client/model/snooze_time_unit_enum.py groundlight_openapi_client/model/source_enum.py groundlight_openapi_client/model/status_enum.py +groundlight_openapi_client/model/text_mode_configuration.py groundlight_openapi_client/model/text_recognition_result.py groundlight_openapi_client/model/verb_enum.py groundlight_openapi_client/model/webhook_action.py diff --git a/generated/README.md b/generated/README.md index accfe08f..62304432 100644 --- a/generated/README.md +++ b/generated/README.md @@ -179,6 +179,7 @@ Class | Method | HTTP request | Description - [SnoozeTimeUnitEnum](docs/SnoozeTimeUnitEnum.md) - [SourceEnum](docs/SourceEnum.md) - [StatusEnum](docs/StatusEnum.md) + - [TextModeConfiguration](docs/TextModeConfiguration.md) - [TextRecognitionResult](docs/TextRecognitionResult.md) - [VerbEnum](docs/VerbEnum.md) - [WebhookAction](docs/WebhookAction.md) diff --git a/generated/docs/TextModeConfiguration.md b/generated/docs/TextModeConfiguration.md new file mode 100644 index 00000000..01bcfdf1 --- /dev/null +++ b/generated/docs/TextModeConfiguration.md @@ -0,0 +1,12 @@ +# TextModeConfiguration + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value_max_length** | **int** | | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/TextRecognitionResult.md b/generated/docs/TextRecognitionResult.md index d50b80df..6258a200 100644 --- a/generated/docs/TextRecognitionResult.md +++ b/generated/docs/TextRecognitionResult.md @@ -4,7 +4,7 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**text** | **str** | | +**text** | **str, none_type** | | **truncated** | **bool** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] diff --git a/generated/groundlight_openapi_client/model/detector_creation_input_request.py b/generated/groundlight_openapi_client/model/detector_creation_input_request.py index 8ac281c7..ee4d485a 100644 --- a/generated/groundlight_openapi_client/model/detector_creation_input_request.py +++ b/generated/groundlight_openapi_client/model/detector_creation_input_request.py @@ -33,10 +33,12 @@ def lazy_import(): from groundlight_openapi_client.model.count_mode_configuration import CountModeConfiguration from groundlight_openapi_client.model.mode_enum import ModeEnum from groundlight_openapi_client.model.multi_class_mode_configuration import MultiClassModeConfiguration + from groundlight_openapi_client.model.text_mode_configuration import TextModeConfiguration globals()["CountModeConfiguration"] = CountModeConfiguration globals()["ModeEnum"] = ModeEnum globals()["MultiClassModeConfiguration"] = MultiClassModeConfiguration + globals()["TextModeConfiguration"] = TextModeConfiguration class DetectorCreationInputRequest(ModelNormal): diff --git a/generated/groundlight_openapi_client/model/label_value_request.py b/generated/groundlight_openapi_client/model/label_value_request.py index 362d870c..6a25d37f 100644 --- a/generated/groundlight_openapi_client/model/label_value_request.py +++ b/generated/groundlight_openapi_client/model/label_value_request.py @@ -62,9 +62,6 @@ class LabelValueRequest(ModelNormal): allowed_values = {} validations = { - ("label",): { - "min_length": 1, - }, ("image_query_id",): { "min_length": 1, }, diff --git a/generated/groundlight_openapi_client/model/text_mode_configuration.py b/generated/groundlight_openapi_client/model/text_mode_configuration.py new file mode 100644 index 00000000..ee4c4e73 --- /dev/null +++ b/generated/groundlight_openapi_client/model/text_mode_configuration.py @@ -0,0 +1,273 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.18.2 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class TextModeConfiguration(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = {} + + validations = { + ("value_max_length",): { + "inclusive_maximum": 250, + "inclusive_minimum": 1, + }, + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + return ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value_max_length": (int,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + attribute_map = { + "value_max_length": "value_max_length", # noqa: E501 + } + + read_only_vars = {} + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + """TextModeConfiguration - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + value_max_length (int): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): # noqa: E501 + """TextModeConfiguration - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + value_max_length (int): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + "class with read only attributes." + ) diff --git a/generated/groundlight_openapi_client/model/text_recognition_result.py b/generated/groundlight_openapi_client/model/text_recognition_result.py index ca44ff65..cddca732 100644 --- a/generated/groundlight_openapi_client/model/text_recognition_result.py +++ b/generated/groundlight_openapi_client/model/text_recognition_result.py @@ -101,7 +101,10 @@ def openapi_types(): and the value is attribute type. """ return { - "text": (str,), # noqa: E501 + "text": ( + str, + none_type, + ), # noqa: E501 "truncated": (bool,), # noqa: E501 "confidence": ( float, @@ -131,7 +134,7 @@ def _from_openapi_data(cls, text, truncated, *args, **kwargs): # noqa: E501 """TextRecognitionResult - a model defined in OpenAPI Args: - text (str): + text (str, none_type): truncated (bool): Keyword Args: @@ -223,7 +226,7 @@ def __init__(self, text, truncated, *args, **kwargs): # noqa: E501 """TextRecognitionResult - a model defined in OpenAPI Args: - text (str): + text (str, none_type): truncated (bool): Keyword Args: diff --git a/generated/groundlight_openapi_client/models/__init__.py b/generated/groundlight_openapi_client/models/__init__.py index 3528a1aa..6a064903 100644 --- a/generated/groundlight_openapi_client/models/__init__.py +++ b/generated/groundlight_openapi_client/models/__init__.py @@ -51,6 +51,7 @@ from groundlight_openapi_client.model.snooze_time_unit_enum import SnoozeTimeUnitEnum from groundlight_openapi_client.model.source_enum import SourceEnum from groundlight_openapi_client.model.status_enum import StatusEnum +from groundlight_openapi_client.model.text_mode_configuration import TextModeConfiguration from groundlight_openapi_client.model.text_recognition_result import TextRecognitionResult from groundlight_openapi_client.model.verb_enum import VerbEnum from groundlight_openapi_client.model.webhook_action import WebhookAction diff --git a/generated/model.py b/generated/model.py index 2a6062f9..e767d70b 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2025-02-08T00:46:39+00:00 +# timestamp: 2025-02-22T00:13:41+00:00 from __future__ import annotations @@ -241,7 +241,7 @@ class MultiClassificationResult(BaseModel): class TextRecognitionResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None source: Optional[Source] = None - text: str + text: Optional[str] = Field(...) truncated: bool @@ -256,7 +256,7 @@ class MultiClassModeConfiguration(BaseModel): class TextModeConfiguration(BaseModel): - pass + value_max_length: Optional[conint(ge=1, le=250)] = None class ChannelEnum(str, Enum): @@ -411,7 +411,7 @@ class LabelValue(BaseModel): class LabelValueRequest(BaseModel): - label: constr(min_length=1) + label: str image_query_id: constr(min_length=1) rois: Optional[List[ROIRequest]] = None diff --git a/generated/test/test_text_mode_configuration.py b/generated/test/test_text_mode_configuration.py new file mode 100644 index 00000000..81ba287d --- /dev/null +++ b/generated/test/test_text_mode_configuration.py @@ -0,0 +1,35 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.18.2 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.text_mode_configuration import TextModeConfiguration + + +class TestTextModeConfiguration(unittest.TestCase): + """TextModeConfiguration unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testTextModeConfiguration(self): + """Test TextModeConfiguration""" + # FIXME: construct object with mandatory attributes with example values + # model = TextModeConfiguration() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/pyproject.toml b/pyproject.toml index 39f82da0..5f072ae2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ packages = [ {include = "**/*.py", from = "src"}, ] readme = "README.md" -version = "0.22.0" +version = "0.22.1" [tool.poetry.dependencies] # For certifi, use ">=" instead of "^" since it upgrades its "major version" every year, not really following semver diff --git a/spec/public-api.yaml b/spec/public-api.yaml index 78f51142..9476a17c 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -1077,7 +1077,6 @@ components: label: type: string writeOnly: true - minLength: 1 image_query_id: type: string writeOnly: true @@ -1505,6 +1504,7 @@ components: - ALGORITHM label: type: string + nullable: false required: - label TextRecognitionResult: @@ -1526,6 +1526,7 @@ components: - ALGORITHM text: type: string + nullable: true truncated: type: boolean required: @@ -1541,6 +1542,7 @@ components: nullable: false class_name: type: string + nullable: false required: - class_name MultiClassModeConfiguration: @@ -1550,6 +1552,7 @@ components: type: array items: type: string + nullable: false num_classes: type: integer nullable: false @@ -1557,6 +1560,12 @@ components: - class_names TextModeConfiguration: type: object + properties: + value_max_length: + type: integer + minimum: 1 + maximum: 250 + nullable: false ChannelEnum: type: string enum: