From 6a9d625c070f22bc1efbcf2e7c3e2a2b977163c3 Mon Sep 17 00:00:00 2001 From: "Jeremy A. Prescott" Date: Wed, 22 Nov 2023 09:17:18 +0100 Subject: [PATCH] new specs with correct v3 (#1426) regenerate the specs with latest v3 config --- lightly/api/api_workflow_compute_worker.py | 24 +- .../swagger_client/__init__.py | 1 + .../swagger_client/api/__init__.py-e | 22 -- .../swagger_client/api/predictions_api.py | 90 +++---- .../swagger_client/api/scores_api.py | 44 ++-- .../swagger_client/models/__init__.py | 1 + .../swagger_client/models/__init__.py-e | 241 ------------------ .../models/selection_config_entry_strategy.py | 6 +- .../selection_config_v3_entry_strategy.py | 14 +- ...lection_config_v3_entry_strategy_all_of.py | 11 +- .../models/selection_strategy_type_v3.py | 46 ++++ .../test_api_workflow_compute_worker.py | 56 ++-- 12 files changed, 177 insertions(+), 379 deletions(-) delete mode 100644 lightly/openapi_generated/swagger_client/api/__init__.py-e delete mode 100644 lightly/openapi_generated/swagger_client/models/__init__.py-e create mode 100644 lightly/openapi_generated/swagger_client/models/selection_strategy_type_v3.py diff --git a/lightly/api/api_workflow_compute_worker.py b/lightly/api/api_workflow_compute_worker.py index d05e71892..f98244256 100644 --- a/lightly/api/api_workflow_compute_worker.py +++ b/lightly/api/api_workflow_compute_worker.py @@ -22,10 +22,10 @@ DockerWorkerConfigV3Lightly, DockerWorkerRegistryEntryData, DockerWorkerType, - SelectionConfig, - SelectionConfigEntry, - SelectionConfigEntryInput, - SelectionConfigEntryStrategy, + SelectionConfigV3, + SelectionConfigV3Entry, + SelectionConfigV3EntryInput, + SelectionConfigV3EntryStrategy, TagData, ) from lightly.openapi_generated.swagger_client.rest import ApiException @@ -175,7 +175,7 @@ def create_compute_worker_config( self, worker_config: Optional[Dict[str, Any]] = None, lightly_config: Optional[Dict[str, Any]] = None, - selection_config: Optional[Union[Dict[str, Any], SelectionConfig]] = None, + selection_config: Optional[Union[Dict[str, Any], SelectionConfigV3]] = None, ) -> str: """Creates a new configuration for a Lightly Worker run. @@ -269,7 +269,7 @@ def schedule_compute_worker_run( self, worker_config: Optional[Dict[str, Any]] = None, lightly_config: Optional[Dict[str, Any]] = None, - selection_config: Optional[Union[Dict[str, Any], SelectionConfig]] = None, + selection_config: Optional[Union[Dict[str, Any], SelectionConfigV3]] = None, priority: str = DockerRunScheduledPriority.MID, runs_on: Optional[List[str]] = None, ) -> str: @@ -634,17 +634,17 @@ def get_compute_worker_run_tags(self, run_id: str) -> List[TagData]: return tags_in_dataset -def selection_config_from_dict(cfg: Dict[str, Any]) -> SelectionConfig: - """Recursively converts selection config from dict to a SelectionConfig instance.""" +def selection_config_from_dict(cfg: Dict[str, Any]) -> SelectionConfigV3: + """Recursively converts selection config from dict to a SelectionConfigV3 instance.""" strategies = [] for entry in cfg.get("strategies", []): new_entry = copy.deepcopy(entry) - new_entry["input"] = SelectionConfigEntryInput(**entry["input"]) - new_entry["strategy"] = SelectionConfigEntryStrategy(**entry["strategy"]) - strategies.append(SelectionConfigEntry(**new_entry)) + new_entry["input"] = SelectionConfigV3EntryInput(**entry["input"]) + new_entry["strategy"] = SelectionConfigV3EntryStrategy(**entry["strategy"]) + strategies.append(SelectionConfigV3Entry(**new_entry)) new_cfg = copy.deepcopy(cfg) new_cfg["strategies"] = strategies - return SelectionConfig(**new_cfg) + return SelectionConfigV3(**new_cfg) _T = TypeVar("_T") diff --git a/lightly/openapi_generated/swagger_client/__init__.py b/lightly/openapi_generated/swagger_client/__init__.py index fa528cff6..8d3328d74 100644 --- a/lightly/openapi_generated/swagger_client/__init__.py +++ b/lightly/openapi_generated/swagger_client/__init__.py @@ -233,6 +233,7 @@ from lightly.openapi_generated.swagger_client.models.selection_input_type import SelectionInputType from lightly.openapi_generated.swagger_client.models.selection_strategy_threshold_operation import SelectionStrategyThresholdOperation from lightly.openapi_generated.swagger_client.models.selection_strategy_type import SelectionStrategyType +from lightly.openapi_generated.swagger_client.models.selection_strategy_type_v3 import SelectionStrategyTypeV3 from lightly.openapi_generated.swagger_client.models.service_account_basic_data import ServiceAccountBasicData from lightly.openapi_generated.swagger_client.models.set_embeddings_is_processed_flag_by_id_body_request import SetEmbeddingsIsProcessedFlagByIdBodyRequest from lightly.openapi_generated.swagger_client.models.shared_access_config_create_request import SharedAccessConfigCreateRequest diff --git a/lightly/openapi_generated/swagger_client/api/__init__.py-e b/lightly/openapi_generated/swagger_client/api/__init__.py-e deleted file mode 100644 index 341e863fc..000000000 --- a/lightly/openapi_generated/swagger_client/api/__init__.py-e +++ /dev/null @@ -1,22 +0,0 @@ -# flake8: noqa - -# import apis into api package -from lightly.openapi_generated.swagger_client.api.collaboration_api import CollaborationApi -from lightly.openapi_generated.swagger_client.api.datasets_api import DatasetsApi -from lightly.openapi_generated.swagger_client.api.datasources_api import DatasourcesApi -from lightly.openapi_generated.swagger_client.api.docker_api import DockerApi -from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi -from lightly.openapi_generated.swagger_client.api.embeddings2d_api import Embeddings2dApi -from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi -from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi -from lightly.openapi_generated.swagger_client.api.meta_data_configurations_api import MetaDataConfigurationsApi -from lightly.openapi_generated.swagger_client.api.predictions_api import PredictionsApi -from lightly.openapi_generated.swagger_client.api.profiles_api import ProfilesApi -from lightly.openapi_generated.swagger_client.api.quota_api import QuotaApi -from lightly.openapi_generated.swagger_client.api.samples_api import SamplesApi -from lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi -from lightly.openapi_generated.swagger_client.api.scores_api import ScoresApi -from lightly.openapi_generated.swagger_client.api.tags_api import TagsApi -from lightly.openapi_generated.swagger_client.api.teams_api import TeamsApi -from lightly.openapi_generated.swagger_client.api.versioning_api import VersioningApi - diff --git a/lightly/openapi_generated/swagger_client/api/predictions_api.py b/lightly/openapi_generated/swagger_client/api/predictions_api.py index 30c2ba163..c188f45fc 100644 --- a/lightly/openapi_generated/swagger_client/api/predictions_api.py +++ b/lightly/openapi_generated/swagger_client/api/predictions_api.py @@ -50,24 +50,24 @@ def __init__(self, api_client=None): self.api_client = api_client @validate_arguments - def create_or_update_prediction_by_sample_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], prediction_singleton : conlist(PredictionSingleton), **kwargs) -> CreateEntityResponse: # noqa: E501 + def create_or_update_prediction_by_sample_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_singleton : conlist(PredictionSingleton), prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> CreateEntityResponse: # noqa: E501 """create_or_update_prediction_by_sample_id # noqa: E501 Create/Update all the prediction singletons per taskName for a sampleId in the order/index of them being discovered # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_or_update_prediction_by_sample_id(dataset_id, sample_id, prediction_uuid_timestamp, prediction_singleton, async_req=True) + >>> thread = api.create_or_update_prediction_by_sample_id(dataset_id, sample_id, prediction_singleton, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str :param sample_id: ObjectId of the sample (required) :type sample_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) - :type prediction_uuid_timestamp: int :param prediction_singleton: (required) :type prediction_singleton: List[PredictionSingleton] + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _request_timeout: timeout setting for this request. If one @@ -82,27 +82,27 @@ def create_or_update_prediction_by_sample_id(self, dataset_id : Annotated[constr kwargs['_return_http_data_only'] = True if '_preload_content' in kwargs: raise ValueError("Error! Please call the create_or_update_prediction_by_sample_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") - return self.create_or_update_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, prediction_singleton, **kwargs) # noqa: E501 + return self.create_or_update_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_singleton, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def create_or_update_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], prediction_singleton : conlist(PredictionSingleton), **kwargs) -> ApiResponse: # noqa: E501 + def create_or_update_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_singleton : conlist(PredictionSingleton), prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """create_or_update_prediction_by_sample_id # noqa: E501 Create/Update all the prediction singletons per taskName for a sampleId in the order/index of them being discovered # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_or_update_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, prediction_singleton, async_req=True) + >>> thread = api.create_or_update_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_singleton, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str :param sample_id: ObjectId of the sample (required) :type sample_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) - :type prediction_uuid_timestamp: int :param prediction_singleton: (required) :type prediction_singleton: List[PredictionSingleton] + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the ApiResponse.data will @@ -133,8 +133,8 @@ def create_or_update_prediction_by_sample_id_with_http_info(self, dataset_id : A _all_params = [ 'dataset_id', 'sample_id', - 'prediction_uuid_timestamp', - 'prediction_singleton' + 'prediction_singleton', + 'prediction_uuid_timestamp' ] _all_params.extend( [ @@ -227,22 +227,22 @@ def create_or_update_prediction_by_sample_id_with_http_info(self, dataset_id : A _request_auth=_params.get('_request_auth')) @validate_arguments - def create_or_update_prediction_task_schema_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], prediction_task_schema : PredictionTaskSchema, **kwargs) -> CreateEntityResponse: # noqa: E501 + def create_or_update_prediction_task_schema_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_task_schema : PredictionTaskSchema, prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> CreateEntityResponse: # noqa: E501 """create_or_update_prediction_task_schema_by_dataset_id # noqa: E501 Creates/updates a prediction task schema with the task name # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_or_update_prediction_task_schema_by_dataset_id(dataset_id, prediction_uuid_timestamp, prediction_task_schema, async_req=True) + >>> thread = api.create_or_update_prediction_task_schema_by_dataset_id(dataset_id, prediction_task_schema, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) - :type prediction_uuid_timestamp: int :param prediction_task_schema: (required) :type prediction_task_schema: PredictionTaskSchema + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _request_timeout: timeout setting for this request. If one @@ -257,25 +257,25 @@ def create_or_update_prediction_task_schema_by_dataset_id(self, dataset_id : Ann kwargs['_return_http_data_only'] = True if '_preload_content' in kwargs: raise ValueError("Error! Please call the create_or_update_prediction_task_schema_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") - return self.create_or_update_prediction_task_schema_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, prediction_task_schema, **kwargs) # noqa: E501 + return self.create_or_update_prediction_task_schema_by_dataset_id_with_http_info(dataset_id, prediction_task_schema, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def create_or_update_prediction_task_schema_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], prediction_task_schema : PredictionTaskSchema, **kwargs) -> ApiResponse: # noqa: E501 + def create_or_update_prediction_task_schema_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_task_schema : PredictionTaskSchema, prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """create_or_update_prediction_task_schema_by_dataset_id # noqa: E501 Creates/updates a prediction task schema with the task name # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_or_update_prediction_task_schema_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, prediction_task_schema, async_req=True) + >>> thread = api.create_or_update_prediction_task_schema_by_dataset_id_with_http_info(dataset_id, prediction_task_schema, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) - :type prediction_uuid_timestamp: int :param prediction_task_schema: (required) :type prediction_task_schema: PredictionTaskSchema + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the ApiResponse.data will @@ -305,8 +305,8 @@ def create_or_update_prediction_task_schema_by_dataset_id_with_http_info(self, d _all_params = [ 'dataset_id', - 'prediction_uuid_timestamp', - 'prediction_task_schema' + 'prediction_task_schema', + 'prediction_uuid_timestamp' ] _all_params.extend( [ @@ -395,22 +395,22 @@ def create_or_update_prediction_task_schema_by_dataset_id_with_http_info(self, d _request_auth=_params.get('_request_auth')) @validate_arguments - def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], **kwargs) -> PredictionTaskSchema: # noqa: E501 + def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> PredictionTaskSchema: # noqa: E501 """get_prediction_task_schema_by_task_name # noqa: E501 Get a prediction task schemas named taskName for a datasetId # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_prediction_task_schema_by_task_name(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) + >>> thread = api.get_prediction_task_schema_by_task_name(dataset_id, task_name, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) - :type prediction_uuid_timestamp: int :param task_name: The prediction task name for which one wants to list the predictions (required) :type task_name: str + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _request_timeout: timeout setting for this request. If one @@ -425,25 +425,25 @@ def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr( kwargs['_return_http_data_only'] = True if '_preload_content' in kwargs: raise ValueError("Error! Please call the get_prediction_task_schema_by_task_name_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") - return self.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501 + return self.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, task_name, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], **kwargs) -> ApiResponse: # noqa: E501 + def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """get_prediction_task_schema_by_task_name # noqa: E501 Get a prediction task schemas named taskName for a datasetId # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) + >>> thread = api.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, task_name, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) - :type prediction_uuid_timestamp: int :param task_name: The prediction task name for which one wants to list the predictions (required) :type task_name: str + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the ApiResponse.data will @@ -473,8 +473,8 @@ def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : An _all_params = [ 'dataset_id', - 'prediction_uuid_timestamp', - 'task_name' + 'task_name', + 'prediction_uuid_timestamp' ] _all_params.extend( [ @@ -557,7 +557,7 @@ def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : An _request_auth=_params.get('_request_auth')) @validate_arguments - def get_prediction_task_schemas_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> PredictionTaskSchemas: # noqa: E501 + def get_prediction_task_schemas_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> PredictionTaskSchemas: # noqa: E501 """get_prediction_task_schemas_by_dataset_id # noqa: E501 Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501 @@ -569,7 +569,7 @@ def get_prediction_task_schemas_by_dataset_id(self, dataset_id : Annotated[const :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -588,7 +588,7 @@ def get_prediction_task_schemas_by_dataset_id(self, dataset_id : Annotated[const return self.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 + def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """get_prediction_task_schemas_by_dataset_id # noqa: E501 Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501 @@ -600,7 +600,7 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -711,7 +711,7 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : _request_auth=_params.get('_request_auth')) @validate_arguments - def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> List[List]: # noqa: E501 + def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> List[List]: # noqa: E501 """get_predictions_by_dataset_id # noqa: E501 Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501 @@ -723,7 +723,7 @@ def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=Tru :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int :param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name :type task_name: str @@ -744,7 +744,7 @@ def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=Tru return self.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501 @validate_arguments - def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> ApiResponse: # noqa: E501 + def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> ApiResponse: # noqa: E501 """get_predictions_by_dataset_id # noqa: E501 Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501 @@ -756,7 +756,7 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int :param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name :type task_name: str @@ -876,7 +876,7 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co _request_auth=_params.get('_request_auth')) @validate_arguments - def get_predictions_by_sample_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> List[PredictionSingleton]: # noqa: E501 + def get_predictions_by_sample_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> List[PredictionSingleton]: # noqa: E501 """get_predictions_by_sample_id # noqa: E501 Get all prediction singletons of all tasks for a specific sample of a dataset # noqa: E501 @@ -890,7 +890,7 @@ def get_predictions_by_sample_id(self, dataset_id : Annotated[constr(strict=True :type dataset_id: str :param sample_id: ObjectId of the sample (required) :type sample_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -909,7 +909,7 @@ def get_predictions_by_sample_id(self, dataset_id : Annotated[constr(strict=True return self.get_predictions_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def get_predictions_by_sample_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> ApiResponse: # noqa: E501 + def get_predictions_by_sample_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """get_predictions_by_sample_id # noqa: E501 Get all prediction singletons of all tasks for a specific sample of a dataset # noqa: E501 @@ -923,7 +923,7 @@ def get_predictions_by_sample_id_with_http_info(self, dataset_id : Annotated[con :type dataset_id: str :param sample_id: ObjectId of the sample (required) :type sample_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional diff --git a/lightly/openapi_generated/swagger_client/api/scores_api.py b/lightly/openapi_generated/swagger_client/api/scores_api.py index 4b07d9206..fadd3efcc 100644 --- a/lightly/openapi_generated/swagger_client/api/scores_api.py +++ b/lightly/openapi_generated/swagger_client/api/scores_api.py @@ -22,7 +22,7 @@ from pydantic import Field, conint, constr, validator -from typing import List +from typing import List, Optional from lightly.openapi_generated.swagger_client.models.active_learning_score_create_request import ActiveLearningScoreCreateRequest from lightly.openapi_generated.swagger_client.models.active_learning_score_data import ActiveLearningScoreData @@ -220,24 +220,24 @@ def create_or_update_active_learning_score_by_tag_id_with_http_info(self, datase _request_auth=_params.get('_request_auth')) @validate_arguments - def create_or_update_active_learning_v2_score_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501 + def create_or_update_active_learning_v2_score_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> CreateEntityResponse: # noqa: E501 """create_or_update_active_learning_v2_score_by_dataset_id # noqa: E501 Create or update active learning score object for a dataset, taskName, predictionUUIDTimestamp # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_or_update_active_learning_v2_score_by_dataset_id(dataset_id, prediction_uuid_timestamp, task_name, active_learning_score_create_request, async_req=True) + >>> thread = api.create_or_update_active_learning_v2_score_by_dataset_id(dataset_id, task_name, active_learning_score_create_request, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) - :type prediction_uuid_timestamp: int :param task_name: The prediction task name for which one wants to list the predictions (required) :type task_name: str :param active_learning_score_create_request: (required) :type active_learning_score_create_request: ActiveLearningScoreCreateRequest + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _request_timeout: timeout setting for this request. If one @@ -252,27 +252,27 @@ def create_or_update_active_learning_v2_score_by_dataset_id(self, dataset_id : A kwargs['_return_http_data_only'] = True if '_preload_content' in kwargs: raise ValueError("Error! Please call the create_or_update_active_learning_v2_score_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") - return self.create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, active_learning_score_create_request, **kwargs) # noqa: E501 + return self.create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(dataset_id, task_name, active_learning_score_create_request, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, **kwargs) -> ApiResponse: # noqa: E501 + def create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """create_or_update_active_learning_v2_score_by_dataset_id # noqa: E501 Create or update active learning score object for a dataset, taskName, predictionUUIDTimestamp # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, active_learning_score_create_request, async_req=True) + >>> thread = api.create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(dataset_id, task_name, active_learning_score_create_request, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) - :type prediction_uuid_timestamp: int :param task_name: The prediction task name for which one wants to list the predictions (required) :type task_name: str :param active_learning_score_create_request: (required) :type active_learning_score_create_request: ActiveLearningScoreCreateRequest + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the ApiResponse.data will @@ -302,9 +302,9 @@ def create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(self, _all_params = [ 'dataset_id', - 'prediction_uuid_timestamp', 'task_name', - 'active_learning_score_create_request' + 'active_learning_score_create_request', + 'prediction_uuid_timestamp' ] _all_params.extend( [ @@ -338,18 +338,18 @@ def create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(self, # process the query parameters _query_params = [] - if _params.get('prediction_uuid_timestamp') is not None: # noqa: E501 - _query_params.append(( - 'predictionUUIDTimestamp', - _params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp'] - )) - if _params.get('task_name') is not None: # noqa: E501 _query_params.append(( 'taskName', _params['task_name'].value if hasattr(_params['task_name'], 'value') else _params['task_name'] )) + if _params.get('prediction_uuid_timestamp') is not None: # noqa: E501 + _query_params.append(( + 'predictionUUIDTimestamp', + _params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp'] + )) + # process the header parameters _header_params = dict(_params.get('_headers', {})) # process the form parameters @@ -865,7 +865,7 @@ def get_active_learning_v2_score_by_dataset_and_score_id_with_http_info(self, da _request_auth=_params.get('_request_auth')) @validate_arguments - def get_active_learning_v2_scores_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> List[ActiveLearningScoreTypesV2Data]: # noqa: E501 + def get_active_learning_v2_scores_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> List[ActiveLearningScoreTypesV2Data]: # noqa: E501 """get_active_learning_v2_scores_by_dataset_id # noqa: E501 Get all AL score types by datasetId and predictionUUIDTimestamp # noqa: E501 @@ -877,7 +877,7 @@ def get_active_learning_v2_scores_by_dataset_id(self, dataset_id : Annotated[con :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -896,7 +896,7 @@ def get_active_learning_v2_scores_by_dataset_id(self, dataset_id : Annotated[con return self.get_active_learning_v2_scores_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def get_active_learning_v2_scores_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> ApiResponse: # noqa: E501 + def get_active_learning_v2_scores_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """get_active_learning_v2_scores_by_dataset_id # noqa: E501 Get all AL score types by datasetId and predictionUUIDTimestamp # noqa: E501 @@ -908,7 +908,7 @@ def get_active_learning_v2_scores_by_dataset_id_with_http_info(self, dataset_id :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) + :param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional diff --git a/lightly/openapi_generated/swagger_client/models/__init__.py b/lightly/openapi_generated/swagger_client/models/__init__.py index e7a4e3b4d..fd20c0edc 100644 --- a/lightly/openapi_generated/swagger_client/models/__init__.py +++ b/lightly/openapi_generated/swagger_client/models/__init__.py @@ -200,6 +200,7 @@ from lightly.openapi_generated.swagger_client.models.selection_input_type import SelectionInputType from lightly.openapi_generated.swagger_client.models.selection_strategy_threshold_operation import SelectionStrategyThresholdOperation from lightly.openapi_generated.swagger_client.models.selection_strategy_type import SelectionStrategyType +from lightly.openapi_generated.swagger_client.models.selection_strategy_type_v3 import SelectionStrategyTypeV3 from lightly.openapi_generated.swagger_client.models.service_account_basic_data import ServiceAccountBasicData from lightly.openapi_generated.swagger_client.models.set_embeddings_is_processed_flag_by_id_body_request import SetEmbeddingsIsProcessedFlagByIdBodyRequest from lightly.openapi_generated.swagger_client.models.shared_access_config_create_request import SharedAccessConfigCreateRequest diff --git a/lightly/openapi_generated/swagger_client/models/__init__.py-e b/lightly/openapi_generated/swagger_client/models/__init__.py-e deleted file mode 100644 index 5c06aec0e..000000000 --- a/lightly/openapi_generated/swagger_client/models/__init__.py-e +++ /dev/null @@ -1,241 +0,0 @@ -# coding: utf-8 - -# flake8: noqa -""" - Lightly API - - Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Contact: support@lightly.ai - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" - - -# import models into model package -from lightly.openapi_generated.swagger_client.models.active_learning_score_create_request import ActiveLearningScoreCreateRequest -from lightly.openapi_generated.swagger_client.models.active_learning_score_data import ActiveLearningScoreData -from lightly.openapi_generated.swagger_client.models.active_learning_score_types_v2_data import ActiveLearningScoreTypesV2Data -from lightly.openapi_generated.swagger_client.models.active_learning_score_v2_data import ActiveLearningScoreV2Data -from lightly.openapi_generated.swagger_client.models.api_error_code import ApiErrorCode -from lightly.openapi_generated.swagger_client.models.api_error_response import ApiErrorResponse -from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData -from lightly.openapi_generated.swagger_client.models.configuration_data import ConfigurationData -from lightly.openapi_generated.swagger_client.models.configuration_entry import ConfigurationEntry -from lightly.openapi_generated.swagger_client.models.configuration_set_request import ConfigurationSetRequest -from lightly.openapi_generated.swagger_client.models.configuration_value_data_type import ConfigurationValueDataType -from lightly.openapi_generated.swagger_client.models.create_cf_bucket_activity_request import CreateCFBucketActivityRequest -from lightly.openapi_generated.swagger_client.models.create_docker_worker_registry_entry_request import CreateDockerWorkerRegistryEntryRequest -from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse -from lightly.openapi_generated.swagger_client.models.create_sample_with_write_urls_response import CreateSampleWithWriteUrlsResponse -from lightly.openapi_generated.swagger_client.models.create_team_membership_request import CreateTeamMembershipRequest -from lightly.openapi_generated.swagger_client.models.creator import Creator -from lightly.openapi_generated.swagger_client.models.crop_data import CropData -from lightly.openapi_generated.swagger_client.models.dataset_create_request import DatasetCreateRequest -from lightly.openapi_generated.swagger_client.models.dataset_creator import DatasetCreator -from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData -from lightly.openapi_generated.swagger_client.models.dataset_data_enriched import DatasetDataEnriched -from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData -from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType -from lightly.openapi_generated.swagger_client.models.dataset_update_request import DatasetUpdateRequest -from lightly.openapi_generated.swagger_client.models.datasource_config import DatasourceConfig -from lightly.openapi_generated.swagger_client.models.datasource_config_azure import DatasourceConfigAzure -from lightly.openapi_generated.swagger_client.models.datasource_config_azure_all_of import DatasourceConfigAzureAllOf -from lightly.openapi_generated.swagger_client.models.datasource_config_base import DatasourceConfigBase -from lightly.openapi_generated.swagger_client.models.datasource_config_base_full_path import DatasourceConfigBaseFullPath -from lightly.openapi_generated.swagger_client.models.datasource_config_gcs import DatasourceConfigGCS -from lightly.openapi_generated.swagger_client.models.datasource_config_gcs_all_of import DatasourceConfigGCSAllOf -from lightly.openapi_generated.swagger_client.models.datasource_config_lightly import DatasourceConfigLIGHTLY -from lightly.openapi_generated.swagger_client.models.datasource_config_local import DatasourceConfigLOCAL -from lightly.openapi_generated.swagger_client.models.datasource_config_local_all_of import DatasourceConfigLOCALAllOf -from lightly.openapi_generated.swagger_client.models.datasource_config_obs import DatasourceConfigOBS -from lightly.openapi_generated.swagger_client.models.datasource_config_obs_all_of import DatasourceConfigOBSAllOf -from lightly.openapi_generated.swagger_client.models.datasource_config_s3 import DatasourceConfigS3 -from lightly.openapi_generated.swagger_client.models.datasource_config_s3_all_of import DatasourceConfigS3AllOf -from lightly.openapi_generated.swagger_client.models.datasource_config_s3_delegated_access import DatasourceConfigS3DelegatedAccess -from lightly.openapi_generated.swagger_client.models.datasource_config_s3_delegated_access_all_of import DatasourceConfigS3DelegatedAccessAllOf -from lightly.openapi_generated.swagger_client.models.datasource_config_verify_data import DatasourceConfigVerifyData -from lightly.openapi_generated.swagger_client.models.datasource_config_verify_data_errors import DatasourceConfigVerifyDataErrors -from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_request import DatasourceProcessedUntilTimestampRequest -from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_response import DatasourceProcessedUntilTimestampResponse -from lightly.openapi_generated.swagger_client.models.datasource_purpose import DatasourcePurpose -from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data import DatasourceRawSamplesData -from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data_row import DatasourceRawSamplesDataRow -from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_metadata_data import DatasourceRawSamplesMetadataData -from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_metadata_data_row import DatasourceRawSamplesMetadataDataRow -from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data import DatasourceRawSamplesPredictionsData -from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data_row import DatasourceRawSamplesPredictionsDataRow -from lightly.openapi_generated.swagger_client.models.dimensionality_reduction_method import DimensionalityReductionMethod -from lightly.openapi_generated.swagger_client.models.docker_license_information import DockerLicenseInformation -from lightly.openapi_generated.swagger_client.models.docker_run_artifact_create_request import DockerRunArtifactCreateRequest -from lightly.openapi_generated.swagger_client.models.docker_run_artifact_created_data import DockerRunArtifactCreatedData -from lightly.openapi_generated.swagger_client.models.docker_run_artifact_data import DockerRunArtifactData -from lightly.openapi_generated.swagger_client.models.docker_run_artifact_storage_location import DockerRunArtifactStorageLocation -from lightly.openapi_generated.swagger_client.models.docker_run_artifact_type import DockerRunArtifactType -from lightly.openapi_generated.swagger_client.models.docker_run_create_request import DockerRunCreateRequest -from lightly.openapi_generated.swagger_client.models.docker_run_data import DockerRunData -from lightly.openapi_generated.swagger_client.models.docker_run_log_data import DockerRunLogData -from lightly.openapi_generated.swagger_client.models.docker_run_log_entry_data import DockerRunLogEntryData -from lightly.openapi_generated.swagger_client.models.docker_run_log_level import DockerRunLogLevel -from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_create_request import DockerRunScheduledCreateRequest -from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_data import DockerRunScheduledData -from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_priority import DockerRunScheduledPriority -from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_state import DockerRunScheduledState -from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_update_request import DockerRunScheduledUpdateRequest -from lightly.openapi_generated.swagger_client.models.docker_run_state import DockerRunState -from lightly.openapi_generated.swagger_client.models.docker_run_update_request import DockerRunUpdateRequest -from lightly.openapi_generated.swagger_client.models.docker_task_description import DockerTaskDescription -from lightly.openapi_generated.swagger_client.models.docker_user_stats import DockerUserStats -from lightly.openapi_generated.swagger_client.models.docker_worker_config import DockerWorkerConfig -from lightly.openapi_generated.swagger_client.models.docker_worker_config_create_request import DockerWorkerConfigCreateRequest -from lightly.openapi_generated.swagger_client.models.docker_worker_config_data import DockerWorkerConfigData -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2 import DockerWorkerConfigV2 -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_create_request import DockerWorkerConfigV2CreateRequest -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_data import DockerWorkerConfigV2Data -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_docker import DockerWorkerConfigV2Docker -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_docker_object_level import DockerWorkerConfigV2DockerObjectLevel -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_docker_stopping_condition import DockerWorkerConfigV2DockerStoppingCondition -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly import DockerWorkerConfigV2Lightly -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly_collate import DockerWorkerConfigV2LightlyCollate -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly_model import DockerWorkerConfigV2LightlyModel -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly_trainer import DockerWorkerConfigV2LightlyTrainer -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3 import DockerWorkerConfigV3 -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_create_request import DockerWorkerConfigV3CreateRequest -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_data import DockerWorkerConfigV3Data -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker import DockerWorkerConfigV3Docker -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_corruptness_check import DockerWorkerConfigV3DockerCorruptnessCheck -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_datasource import DockerWorkerConfigV3DockerDatasource -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_training import DockerWorkerConfigV3DockerTraining -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly import DockerWorkerConfigV3Lightly -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_checkpoint_callback import DockerWorkerConfigV3LightlyCheckpointCallback -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_collate import DockerWorkerConfigV3LightlyCollate -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_criterion import DockerWorkerConfigV3LightlyCriterion -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_loader import DockerWorkerConfigV3LightlyLoader -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_model import DockerWorkerConfigV3LightlyModel -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_optimizer import DockerWorkerConfigV3LightlyOptimizer -from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_trainer import DockerWorkerConfigV3LightlyTrainer -from lightly.openapi_generated.swagger_client.models.docker_worker_registry_entry_data import DockerWorkerRegistryEntryData -from lightly.openapi_generated.swagger_client.models.docker_worker_state import DockerWorkerState -from lightly.openapi_generated.swagger_client.models.docker_worker_type import DockerWorkerType -from lightly.openapi_generated.swagger_client.models.embedding2d_create_request import Embedding2dCreateRequest -from lightly.openapi_generated.swagger_client.models.embedding2d_data import Embedding2dData -from lightly.openapi_generated.swagger_client.models.embedding_data import EmbeddingData -from lightly.openapi_generated.swagger_client.models.file_name_format import FileNameFormat -from lightly.openapi_generated.swagger_client.models.file_output_format import FileOutputFormat -from lightly.openapi_generated.swagger_client.models.filename_and_read_url import FilenameAndReadUrl -from lightly.openapi_generated.swagger_client.models.image_type import ImageType -from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest -from lightly.openapi_generated.swagger_client.models.job_result_type import JobResultType -from lightly.openapi_generated.swagger_client.models.job_state import JobState -from lightly.openapi_generated.swagger_client.models.job_status_data import JobStatusData -from lightly.openapi_generated.swagger_client.models.job_status_data_result import JobStatusDataResult -from lightly.openapi_generated.swagger_client.models.job_status_meta import JobStatusMeta -from lightly.openapi_generated.swagger_client.models.job_status_upload_method import JobStatusUploadMethod -from lightly.openapi_generated.swagger_client.models.jobs_data import JobsData -from lightly.openapi_generated.swagger_client.models.label_box_data_row import LabelBoxDataRow -from lightly.openapi_generated.swagger_client.models.label_box_v4_data_row import LabelBoxV4DataRow -from lightly.openapi_generated.swagger_client.models.label_studio_task import LabelStudioTask -from lightly.openapi_generated.swagger_client.models.label_studio_task_data import LabelStudioTaskData -from lightly.openapi_generated.swagger_client.models.lightly_docker_selection_method import LightlyDockerSelectionMethod -from lightly.openapi_generated.swagger_client.models.lightly_model_v2 import LightlyModelV2 -from lightly.openapi_generated.swagger_client.models.lightly_model_v3 import LightlyModelV3 -from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v2 import LightlyTrainerPrecisionV2 -from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v3 import LightlyTrainerPrecisionV3 -from lightly.openapi_generated.swagger_client.models.prediction_singleton import PredictionSingleton -from lightly.openapi_generated.swagger_client.models.prediction_singleton_base import PredictionSingletonBase -from lightly.openapi_generated.swagger_client.models.prediction_singleton_classification import PredictionSingletonClassification -from lightly.openapi_generated.swagger_client.models.prediction_singleton_classification_all_of import PredictionSingletonClassificationAllOf -from lightly.openapi_generated.swagger_client.models.prediction_singleton_instance_segmentation import PredictionSingletonInstanceSegmentation -from lightly.openapi_generated.swagger_client.models.prediction_singleton_instance_segmentation_all_of import PredictionSingletonInstanceSegmentationAllOf -from lightly.openapi_generated.swagger_client.models.prediction_singleton_keypoint_detection import PredictionSingletonKeypointDetection -from lightly.openapi_generated.swagger_client.models.prediction_singleton_keypoint_detection_all_of import PredictionSingletonKeypointDetectionAllOf -from lightly.openapi_generated.swagger_client.models.prediction_singleton_object_detection import PredictionSingletonObjectDetection -from lightly.openapi_generated.swagger_client.models.prediction_singleton_object_detection_all_of import PredictionSingletonObjectDetectionAllOf -from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation import PredictionSingletonSemanticSegmentation -from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation_all_of import PredictionSingletonSemanticSegmentationAllOf -from lightly.openapi_generated.swagger_client.models.prediction_task_schema import PredictionTaskSchema -from lightly.openapi_generated.swagger_client.models.prediction_task_schema_base import PredictionTaskSchemaBase -from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category import PredictionTaskSchemaCategory -from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category_keypoints import PredictionTaskSchemaCategoryKeypoints -from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category_keypoints_all_of import PredictionTaskSchemaCategoryKeypointsAllOf -from lightly.openapi_generated.swagger_client.models.prediction_task_schema_keypoint import PredictionTaskSchemaKeypoint -from lightly.openapi_generated.swagger_client.models.prediction_task_schema_keypoint_all_of import PredictionTaskSchemaKeypointAllOf -from lightly.openapi_generated.swagger_client.models.prediction_task_schema_simple import PredictionTaskSchemaSimple -from lightly.openapi_generated.swagger_client.models.prediction_task_schema_simple_all_of import PredictionTaskSchemaSimpleAllOf -from lightly.openapi_generated.swagger_client.models.prediction_task_schemas import PredictionTaskSchemas -from lightly.openapi_generated.swagger_client.models.profile_basic_data import ProfileBasicData -from lightly.openapi_generated.swagger_client.models.profile_me_data import ProfileMeData -from lightly.openapi_generated.swagger_client.models.profile_me_data_settings import ProfileMeDataSettings -from lightly.openapi_generated.swagger_client.models.questionnaire_data import QuestionnaireData -from lightly.openapi_generated.swagger_client.models.s3_region import S3Region -from lightly.openapi_generated.swagger_client.models.sama_task import SamaTask -from lightly.openapi_generated.swagger_client.models.sama_task_data import SamaTaskData -from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest -from lightly.openapi_generated.swagger_client.models.sample_data import SampleData -from lightly.openapi_generated.swagger_client.models.sample_data_modes import SampleDataModes -from lightly.openapi_generated.swagger_client.models.sample_meta_data import SampleMetaData -from lightly.openapi_generated.swagger_client.models.sample_partial_mode import SamplePartialMode -from lightly.openapi_generated.swagger_client.models.sample_sort_by import SampleSortBy -from lightly.openapi_generated.swagger_client.models.sample_type import SampleType -from lightly.openapi_generated.swagger_client.models.sample_update_request import SampleUpdateRequest -from lightly.openapi_generated.swagger_client.models.sample_write_urls import SampleWriteUrls -from lightly.openapi_generated.swagger_client.models.sampling_config import SamplingConfig -from lightly.openapi_generated.swagger_client.models.sampling_config_stopping_condition import SamplingConfigStoppingCondition -from lightly.openapi_generated.swagger_client.models.sampling_create_request import SamplingCreateRequest -from lightly.openapi_generated.swagger_client.models.sampling_method import SamplingMethod -from lightly.openapi_generated.swagger_client.models.sector import Sector -from lightly.openapi_generated.swagger_client.models.selection_config import SelectionConfig -from lightly.openapi_generated.swagger_client.models.selection_config_all_of import SelectionConfigAllOf -from lightly.openapi_generated.swagger_client.models.selection_config_base import SelectionConfigBase -from lightly.openapi_generated.swagger_client.models.selection_config_entry import SelectionConfigEntry -from lightly.openapi_generated.swagger_client.models.selection_config_entry_input import SelectionConfigEntryInput -from lightly.openapi_generated.swagger_client.models.selection_config_entry_strategy import SelectionConfigEntryStrategy -from lightly.openapi_generated.swagger_client.models.selection_config_v3 import SelectionConfigV3 -from lightly.openapi_generated.swagger_client.models.selection_config_v3_all_of import SelectionConfigV3AllOf -from lightly.openapi_generated.swagger_client.models.selection_config_v3_entry import SelectionConfigV3Entry -from lightly.openapi_generated.swagger_client.models.selection_config_v3_entry_input import SelectionConfigV3EntryInput -from lightly.openapi_generated.swagger_client.models.selection_config_v3_entry_strategy import SelectionConfigV3EntryStrategy -from lightly.openapi_generated.swagger_client.models.selection_config_v3_entry_strategy_all_of import SelectionConfigV3EntryStrategyAllOf -from lightly.openapi_generated.swagger_client.models.selection_config_v3_entry_strategy_all_of_target_range import SelectionConfigV3EntryStrategyAllOfTargetRange -from lightly.openapi_generated.swagger_client.models.selection_input_predictions_name import SelectionInputPredictionsName -from lightly.openapi_generated.swagger_client.models.selection_input_type import SelectionInputType -from lightly.openapi_generated.swagger_client.models.selection_strategy_threshold_operation import SelectionStrategyThresholdOperation -from lightly.openapi_generated.swagger_client.models.selection_strategy_type import SelectionStrategyType -from lightly.openapi_generated.swagger_client.models.service_account_basic_data import ServiceAccountBasicData -from lightly.openapi_generated.swagger_client.models.set_embeddings_is_processed_flag_by_id_body_request import SetEmbeddingsIsProcessedFlagByIdBodyRequest -from lightly.openapi_generated.swagger_client.models.shared_access_config_create_request import SharedAccessConfigCreateRequest -from lightly.openapi_generated.swagger_client.models.shared_access_config_data import SharedAccessConfigData -from lightly.openapi_generated.swagger_client.models.shared_access_type import SharedAccessType -from lightly.openapi_generated.swagger_client.models.tag_active_learning_scores_data import TagActiveLearningScoresData -from lightly.openapi_generated.swagger_client.models.tag_arithmetics_operation import TagArithmeticsOperation -from lightly.openapi_generated.swagger_client.models.tag_arithmetics_request import TagArithmeticsRequest -from lightly.openapi_generated.swagger_client.models.tag_arithmetics_response import TagArithmeticsResponse -from lightly.openapi_generated.swagger_client.models.tag_bit_mask_response import TagBitMaskResponse -from lightly.openapi_generated.swagger_client.models.tag_change_data import TagChangeData -from lightly.openapi_generated.swagger_client.models.tag_change_data_arithmetics import TagChangeDataArithmetics -from lightly.openapi_generated.swagger_client.models.tag_change_data_initial import TagChangeDataInitial -from lightly.openapi_generated.swagger_client.models.tag_change_data_metadata import TagChangeDataMetadata -from lightly.openapi_generated.swagger_client.models.tag_change_data_operation_method import TagChangeDataOperationMethod -from lightly.openapi_generated.swagger_client.models.tag_change_data_rename import TagChangeDataRename -from lightly.openapi_generated.swagger_client.models.tag_change_data_sampler import TagChangeDataSampler -from lightly.openapi_generated.swagger_client.models.tag_change_data_samples import TagChangeDataSamples -from lightly.openapi_generated.swagger_client.models.tag_change_data_scatterplot import TagChangeDataScatterplot -from lightly.openapi_generated.swagger_client.models.tag_change_data_upsize import TagChangeDataUpsize -from lightly.openapi_generated.swagger_client.models.tag_change_entry import TagChangeEntry -from lightly.openapi_generated.swagger_client.models.tag_create_request import TagCreateRequest -from lightly.openapi_generated.swagger_client.models.tag_creator import TagCreator -from lightly.openapi_generated.swagger_client.models.tag_data import TagData -from lightly.openapi_generated.swagger_client.models.tag_update_request import TagUpdateRequest -from lightly.openapi_generated.swagger_client.models.tag_upsize_request import TagUpsizeRequest -from lightly.openapi_generated.swagger_client.models.task_type import TaskType -from lightly.openapi_generated.swagger_client.models.team_basic_data import TeamBasicData -from lightly.openapi_generated.swagger_client.models.team_data import TeamData -from lightly.openapi_generated.swagger_client.models.team_role import TeamRole -from lightly.openapi_generated.swagger_client.models.trigger2d_embedding_job_request import Trigger2dEmbeddingJobRequest -from lightly.openapi_generated.swagger_client.models.update_docker_worker_registry_entry_request import UpdateDockerWorkerRegistryEntryRequest -from lightly.openapi_generated.swagger_client.models.update_team_membership_request import UpdateTeamMembershipRequest -from lightly.openapi_generated.swagger_client.models.user_type import UserType -from lightly.openapi_generated.swagger_client.models.video_frame_data import VideoFrameData -from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData diff --git a/lightly/openapi_generated/swagger_client/models/selection_config_entry_strategy.py b/lightly/openapi_generated/swagger_client/models/selection_config_entry_strategy.py index e614c2978..67bf84527 100644 --- a/lightly/openapi_generated/swagger_client/models/selection_config_entry_strategy.py +++ b/lightly/openapi_generated/swagger_client/models/selection_config_entry_strategy.py @@ -20,7 +20,7 @@ from typing import Any, Dict, Optional, Union -from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, confloat, conint +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt from lightly.openapi_generated.swagger_client.models.selection_strategy_threshold_operation import SelectionStrategyThresholdOperation from lightly.openapi_generated.swagger_client.models.selection_strategy_type import SelectionStrategyType @@ -29,12 +29,11 @@ class SelectionConfigEntryStrategy(BaseModel): SelectionConfigEntryStrategy """ type: SelectionStrategyType = Field(...) - strength: Optional[Union[confloat(le=1000000000, ge=-1000000000, strict=True), conint(le=1000000000, ge=-1000000000, strict=True)]] = Field(None, description="The relative strength of this strategy compared to other strategies. The default value is 1.0, which is set in the worker for backwards compatibility. The minimum and maximum values of +-10^9 are used to prevent numerical issues. ") stopping_condition_minimum_distance: Optional[Union[StrictFloat, StrictInt]] = None threshold: Optional[Union[StrictFloat, StrictInt]] = None operation: Optional[SelectionStrategyThresholdOperation] = None target: Optional[Dict[str, Any]] = None - __properties = ["type", "strength", "stopping_condition_minimum_distance", "threshold", "operation", "target"] + __properties = ["type", "stopping_condition_minimum_distance", "threshold", "operation", "target"] class Config: """Pydantic configuration""" @@ -80,7 +79,6 @@ def from_dict(cls, obj: dict) -> SelectionConfigEntryStrategy: _obj = SelectionConfigEntryStrategy.parse_obj({ "type": obj.get("type"), - "strength": obj.get("strength"), "stopping_condition_minimum_distance": obj.get("stopping_condition_minimum_distance"), "threshold": obj.get("threshold"), "operation": obj.get("operation"), diff --git a/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_strategy.py b/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_strategy.py index d5b39a3dc..a9e00e1ea 100644 --- a/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_strategy.py +++ b/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_strategy.py @@ -23,21 +23,23 @@ from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, confloat, conint from lightly.openapi_generated.swagger_client.models.selection_config_v3_entry_strategy_all_of_target_range import SelectionConfigV3EntryStrategyAllOfTargetRange from lightly.openapi_generated.swagger_client.models.selection_strategy_threshold_operation import SelectionStrategyThresholdOperation -from lightly.openapi_generated.swagger_client.models.selection_strategy_type import SelectionStrategyType +from lightly.openapi_generated.swagger_client.models.selection_strategy_type_v3 import SelectionStrategyTypeV3 class SelectionConfigV3EntryStrategy(BaseModel): """ SelectionConfigV3EntryStrategy """ - type: SelectionStrategyType = Field(...) - strength: Optional[Union[confloat(le=1000000000, ge=-1000000000, strict=True), conint(le=1000000000, ge=-1000000000, strict=True)]] = Field(None, description="The relative strength of this strategy compared to other strategies. The default value is 1.0, which is set in the worker for backwards compatibility. The minimum and maximum values of +-10^9 are used to prevent numerical issues. ") + type: SelectionStrategyTypeV3 = Field(...) stopping_condition_minimum_distance: Optional[Union[StrictFloat, StrictInt]] = None threshold: Optional[Union[StrictFloat, StrictInt]] = None operation: Optional[SelectionStrategyThresholdOperation] = None target: Optional[Dict[str, Any]] = None + num_nearest_neighbors: Optional[Union[confloat(ge=2, strict=True), conint(ge=2, strict=True)]] = Field(None, alias="numNearestNeighbors", description="It is the number of nearest datapoints used to compute the typicality of each sample. ") + stopping_condition_minimum_typicality: Optional[Union[confloat(gt=0, strict=True), conint(gt=0, strict=True)]] = Field(None, alias="stoppingConditionMinimumTypicality", description="It is the minimal allowed typicality of the selected samples. When the typicality of the selected samples reaches this, the selection stops. It should be a number between 0 and 1. ") + strength: Optional[Union[confloat(le=1000000000, ge=-1000000000, strict=True), conint(le=1000000000, ge=-1000000000, strict=True)]] = Field(None, description="The relative strength of this strategy compared to other strategies. The default value is 1.0, which is set in the worker for backwards compatibility. The minimum and maximum values of +-10^9 are used to prevent numerical issues. ") stopping_condition_max_sum: Optional[Union[confloat(ge=0.0, strict=True), conint(ge=0, strict=True)]] = Field(None, alias="stoppingConditionMaxSum", description="When the sum of inputs reaches this, the selection stops. Only compatible with the WEIGHTS strategy. Similar to the stopping_condition_minimum_distance for the DIVERSITY strategy. ") target_range: Optional[SelectionConfigV3EntryStrategyAllOfTargetRange] = Field(None, alias="targetRange") - __properties = ["type", "strength", "stopping_condition_minimum_distance", "threshold", "operation", "target", "stoppingConditionMaxSum", "targetRange"] + __properties = ["type", "stopping_condition_minimum_distance", "threshold", "operation", "target", "numNearestNeighbors", "stoppingConditionMinimumTypicality", "strength", "stoppingConditionMaxSum", "targetRange"] class Config: """Pydantic configuration""" @@ -86,11 +88,13 @@ def from_dict(cls, obj: dict) -> SelectionConfigV3EntryStrategy: _obj = SelectionConfigV3EntryStrategy.parse_obj({ "type": obj.get("type"), - "strength": obj.get("strength"), "stopping_condition_minimum_distance": obj.get("stopping_condition_minimum_distance"), "threshold": obj.get("threshold"), "operation": obj.get("operation"), "target": obj.get("target"), + "num_nearest_neighbors": obj.get("numNearestNeighbors"), + "stopping_condition_minimum_typicality": obj.get("stoppingConditionMinimumTypicality"), + "strength": obj.get("strength"), "stopping_condition_max_sum": obj.get("stoppingConditionMaxSum"), "target_range": SelectionConfigV3EntryStrategyAllOfTargetRange.from_dict(obj.get("targetRange")) if obj.get("targetRange") is not None else None }) diff --git a/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_strategy_all_of.py b/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_strategy_all_of.py index 2aa0f01cb..30adac0db 100644 --- a/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_strategy_all_of.py +++ b/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_strategy_all_of.py @@ -22,14 +22,19 @@ from typing import Optional, Union from pydantic import Extra, BaseModel, Field, confloat, conint from lightly.openapi_generated.swagger_client.models.selection_config_v3_entry_strategy_all_of_target_range import SelectionConfigV3EntryStrategyAllOfTargetRange +from lightly.openapi_generated.swagger_client.models.selection_strategy_type_v3 import SelectionStrategyTypeV3 class SelectionConfigV3EntryStrategyAllOf(BaseModel): """ SelectionConfigV3EntryStrategyAllOf """ + type: SelectionStrategyTypeV3 = Field(...) + num_nearest_neighbors: Optional[Union[confloat(ge=2, strict=True), conint(ge=2, strict=True)]] = Field(None, alias="numNearestNeighbors", description="It is the number of nearest datapoints used to compute the typicality of each sample. ") + stopping_condition_minimum_typicality: Optional[Union[confloat(gt=0, strict=True), conint(gt=0, strict=True)]] = Field(None, alias="stoppingConditionMinimumTypicality", description="It is the minimal allowed typicality of the selected samples. When the typicality of the selected samples reaches this, the selection stops. It should be a number between 0 and 1. ") + strength: Optional[Union[confloat(le=1000000000, ge=-1000000000, strict=True), conint(le=1000000000, ge=-1000000000, strict=True)]] = Field(None, description="The relative strength of this strategy compared to other strategies. The default value is 1.0, which is set in the worker for backwards compatibility. The minimum and maximum values of +-10^9 are used to prevent numerical issues. ") stopping_condition_max_sum: Optional[Union[confloat(ge=0.0, strict=True), conint(ge=0, strict=True)]] = Field(None, alias="stoppingConditionMaxSum", description="When the sum of inputs reaches this, the selection stops. Only compatible with the WEIGHTS strategy. Similar to the stopping_condition_minimum_distance for the DIVERSITY strategy. ") target_range: Optional[SelectionConfigV3EntryStrategyAllOfTargetRange] = Field(None, alias="targetRange") - __properties = ["stoppingConditionMaxSum", "targetRange"] + __properties = ["type", "numNearestNeighbors", "stoppingConditionMinimumTypicality", "strength", "stoppingConditionMaxSum", "targetRange"] class Config: """Pydantic configuration""" @@ -77,6 +82,10 @@ def from_dict(cls, obj: dict) -> SelectionConfigV3EntryStrategyAllOf: raise ValueError("Error due to additional fields (not defined in SelectionConfigV3EntryStrategyAllOf) in the input: " + str(obj)) _obj = SelectionConfigV3EntryStrategyAllOf.parse_obj({ + "type": obj.get("type"), + "num_nearest_neighbors": obj.get("numNearestNeighbors"), + "stopping_condition_minimum_typicality": obj.get("stoppingConditionMinimumTypicality"), + "strength": obj.get("strength"), "stopping_condition_max_sum": obj.get("stoppingConditionMaxSum"), "target_range": SelectionConfigV3EntryStrategyAllOfTargetRange.from_dict(obj.get("targetRange")) if obj.get("targetRange") is not None else None }) diff --git a/lightly/openapi_generated/swagger_client/models/selection_strategy_type_v3.py b/lightly/openapi_generated/swagger_client/models/selection_strategy_type_v3.py new file mode 100644 index 000000000..4d1984560 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/selection_strategy_type_v3.py @@ -0,0 +1,46 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +import json +import pprint +import re # noqa: F401 +from enum import Enum +from aenum import no_arg # type: ignore + + + + + +class SelectionStrategyTypeV3(str, Enum): + """ + SelectionStrategyTypeV3 + """ + + """ + allowed enum values + """ + DIVERSITY = 'DIVERSITY' + WEIGHTS = 'WEIGHTS' + THRESHOLD = 'THRESHOLD' + BALANCE = 'BALANCE' + SIMILARITY = 'SIMILARITY' + TYPICALITY = 'TYPICALITY' + + @classmethod + def from_json(cls, json_str: str) -> 'SelectionStrategyTypeV3': + """Create an instance of SelectionStrategyTypeV3 from a JSON string""" + return SelectionStrategyTypeV3(json.loads(json_str)) + + diff --git a/tests/api_workflow/test_api_workflow_compute_worker.py b/tests/api_workflow/test_api_workflow_compute_worker.py index 09efdd8a2..b0eb80625 100644 --- a/tests/api_workflow/test_api_workflow_compute_worker.py +++ b/tests/api_workflow/test_api_workflow_compute_worker.py @@ -33,14 +33,14 @@ DockerWorkerConfigV3LightlyLoader, DockerWorkerState, DockerWorkerType, - SelectionConfig, - SelectionConfigEntry, - SelectionConfigEntryInput, - SelectionConfigEntryStrategy, + SelectionConfigV3, + SelectionConfigV3Entry, + SelectionConfigV3EntryInput, + SelectionConfigV3EntryStrategy, SelectionInputPredictionsName, SelectionInputType, SelectionStrategyThresholdOperation, - SelectionStrategyType, + SelectionStrategyTypeV3, TagData, ) from lightly.openapi_generated.swagger_client.rest import ApiException @@ -101,17 +101,17 @@ def test_create_compute_worker_config__selection_config_is_class(self) -> None: "batch_size": 64, }, }, - selection_config=SelectionConfig( + selection_config=SelectionConfigV3( n_samples=20, strategies=[ - SelectionConfigEntry( - input=SelectionConfigEntryInput( + SelectionConfigV3Entry( + input=SelectionConfigV3EntryInput( type=SelectionInputType.EMBEDDINGS, dataset_id=utils.generate_id(), tag_name="some-tag-name", ), - strategy=SelectionConfigEntryStrategy( - type=SelectionStrategyType.SIMILARITY, + strategy=SelectionConfigV3EntryStrategy( + type=SelectionStrategyTypeV3.SIMILARITY, ), ) ], @@ -203,44 +203,46 @@ def _check_if_openapi_generated_obj_is_valid(self, obj) -> Any: return obj_api def test_selection_config(self): - selection_config = SelectionConfig( + selection_config = SelectionConfigV3( n_samples=1, strategies=[ - SelectionConfigEntry( - input=SelectionConfigEntryInput(type=SelectionInputType.EMBEDDINGS), - strategy=SelectionConfigEntryStrategy( - type=SelectionStrategyType.DIVERSITY, + SelectionConfigV3Entry( + input=SelectionConfigV3EntryInput( + type=SelectionInputType.EMBEDDINGS + ), + strategy=SelectionConfigV3EntryStrategy( + type=SelectionStrategyTypeV3.DIVERSITY, stopping_condition_minimum_distance=-1, ), ), - SelectionConfigEntry( - input=SelectionConfigEntryInput( + SelectionConfigV3Entry( + input=SelectionConfigV3EntryInput( type=SelectionInputType.SCORES, task="my-classification-task", score="uncertainty_margin", ), - strategy=SelectionConfigEntryStrategy( - type=SelectionStrategyType.WEIGHTS + strategy=SelectionConfigV3EntryStrategy( + type=SelectionStrategyTypeV3.WEIGHTS ), ), - SelectionConfigEntry( - input=SelectionConfigEntryInput( + SelectionConfigV3Entry( + input=SelectionConfigV3EntryInput( type=SelectionInputType.METADATA, key="lightly.sharpness" ), - strategy=SelectionConfigEntryStrategy( - type=SelectionStrategyType.THRESHOLD, + strategy=SelectionConfigV3EntryStrategy( + type=SelectionStrategyTypeV3.THRESHOLD, threshold=20, operation=SelectionStrategyThresholdOperation.BIGGER_EQUAL, ), ), - SelectionConfigEntry( - input=SelectionConfigEntryInput( + SelectionConfigV3Entry( + input=SelectionConfigV3EntryInput( type=SelectionInputType.PREDICTIONS, task="my_object_detection_task", name=SelectionInputPredictionsName.CLASS_DISTRIBUTION, ), - strategy=SelectionConfigEntryStrategy( - type=SelectionStrategyType.BALANCE, + strategy=SelectionConfigV3EntryStrategy( + type=SelectionStrategyTypeV3.BALANCE, target={"Ambulance": 0.2, "Bus": 0.4}, ), ),