Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update specs with ALscores v2 and shutdownWhenJobFinished #1421

Merged
merged 1 commit into from
Nov 7, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions lightly/openapi_generated/swagger_client/__init__.py
Original file line number Diff line number Diff line change
@@ -50,6 +50,8 @@
# import models into sdk package
from lightly.openapi_generated.swagger_client.models.active_learning_score_create_request import ActiveLearningScoreCreateRequest
from lightly.openapi_generated.swagger_client.models.active_learning_score_data import ActiveLearningScoreData
from lightly.openapi_generated.swagger_client.models.active_learning_score_types_v2_data import ActiveLearningScoreTypesV2Data
from lightly.openapi_generated.swagger_client.models.active_learning_score_v2_data import ActiveLearningScoreV2Data
from lightly.openapi_generated.swagger_client.models.api_error_code import ApiErrorCode
from lightly.openapi_generated.swagger_client.models.api_error_response import ApiErrorResponse
from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData
275 changes: 275 additions & 0 deletions lightly/openapi_generated/swagger_client/__init__.py-e

Large diffs are not rendered by default.

22 changes: 22 additions & 0 deletions lightly/openapi_generated/swagger_client/api/__init__.py-e
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# flake8: noqa

# import apis into api package
from lightly.openapi_generated.swagger_client.api.collaboration_api import CollaborationApi
from lightly.openapi_generated.swagger_client.api.datasets_api import DatasetsApi
from lightly.openapi_generated.swagger_client.api.datasources_api import DatasourcesApi
from lightly.openapi_generated.swagger_client.api.docker_api import DockerApi
from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi
from lightly.openapi_generated.swagger_client.api.embeddings2d_api import Embeddings2dApi
from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi
from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi
from lightly.openapi_generated.swagger_client.api.meta_data_configurations_api import MetaDataConfigurationsApi
from lightly.openapi_generated.swagger_client.api.predictions_api import PredictionsApi
from lightly.openapi_generated.swagger_client.api.profiles_api import ProfilesApi
from lightly.openapi_generated.swagger_client.api.quota_api import QuotaApi
from lightly.openapi_generated.swagger_client.api.samples_api import SamplesApi
from lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi
from lightly.openapi_generated.swagger_client.api.scores_api import ScoresApi
from lightly.openapi_generated.swagger_client.api.tags_api import TagsApi
from lightly.openapi_generated.swagger_client.api.teams_api import TeamsApi
from lightly.openapi_generated.swagger_client.api.versioning_api import VersioningApi

507 changes: 500 additions & 7 deletions lightly/openapi_generated/swagger_client/api/scores_api.py

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions lightly/openapi_generated/swagger_client/models/__init__.py
Original file line number Diff line number Diff line change
@@ -17,6 +17,8 @@
# import models into model package
from lightly.openapi_generated.swagger_client.models.active_learning_score_create_request import ActiveLearningScoreCreateRequest
from lightly.openapi_generated.swagger_client.models.active_learning_score_data import ActiveLearningScoreData
from lightly.openapi_generated.swagger_client.models.active_learning_score_types_v2_data import ActiveLearningScoreTypesV2Data
from lightly.openapi_generated.swagger_client.models.active_learning_score_v2_data import ActiveLearningScoreV2Data
from lightly.openapi_generated.swagger_client.models.api_error_code import ApiErrorCode
from lightly.openapi_generated.swagger_client.models.api_error_response import ApiErrorResponse
from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData
241 changes: 241 additions & 0 deletions lightly/openapi_generated/swagger_client/models/__init__.py-e

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
# coding: utf-8

"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""


from __future__ import annotations
import pprint
import re # noqa: F401
import json



from pydantic import Extra, BaseModel, Field, conint, constr, validator

class ActiveLearningScoreTypesV2Data(BaseModel):
"""
ActiveLearningScoreTypesV2Data
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
dataset_id: constr(strict=True) = Field(..., alias="datasetId", description="MongoDB ObjectId")
prediction_uuid_timestamp: conint(strict=True, ge=0) = Field(..., alias="predictionUUIDTimestamp", description="unix timestamp in milliseconds")
task_name: constr(strict=True, min_length=1) = Field(..., alias="taskName", description="A name which is safe to have as a file/folder name in a file system")
score_type: constr(strict=True, min_length=1) = Field(..., alias="scoreType", description="Type of active learning score")
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
__properties = ["id", "datasetId", "predictionUUIDTimestamp", "taskName", "scoreType", "createdAt"]

@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value

@validator('dataset_id')
def dataset_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value

@validator('task_name')
def task_name_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$/")
return value

@validator('score_type')
def score_type_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]*$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]*$/")
return value

class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid

def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))

def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))

@classmethod
def from_json(cls, json_str: str) -> ActiveLearningScoreTypesV2Data:
"""Create an instance of ActiveLearningScoreTypesV2Data from a JSON string"""
return cls.from_dict(json.loads(json_str))

def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict

@classmethod
def from_dict(cls, obj: dict) -> ActiveLearningScoreTypesV2Data:
"""Create an instance of ActiveLearningScoreTypesV2Data from a dict"""
if obj is None:
return None

if not isinstance(obj, dict):
return ActiveLearningScoreTypesV2Data.parse_obj(obj)

# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in ActiveLearningScoreTypesV2Data) in the input: " + str(obj))

_obj = ActiveLearningScoreTypesV2Data.parse_obj({
"id": obj.get("id"),
"dataset_id": obj.get("datasetId"),
"prediction_uuid_timestamp": obj.get("predictionUUIDTimestamp"),
"task_name": obj.get("taskName"),
"score_type": obj.get("scoreType"),
"created_at": obj.get("createdAt")
})
return _obj

Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
# coding: utf-8

"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""


from __future__ import annotations
import pprint
import re # noqa: F401
import json


from typing import List, Union
from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, conint, conlist, constr, validator

class ActiveLearningScoreV2Data(BaseModel):
"""
ActiveLearningScoreV2Data
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
dataset_id: constr(strict=True) = Field(..., alias="datasetId", description="MongoDB ObjectId")
prediction_uuid_timestamp: conint(strict=True, ge=0) = Field(..., alias="predictionUUIDTimestamp", description="unix timestamp in milliseconds")
task_name: constr(strict=True, min_length=1) = Field(..., alias="taskName", description="A name which is safe to have as a file/folder name in a file system")
score_type: constr(strict=True, min_length=1) = Field(..., alias="scoreType", description="Type of active learning score")
scores: conlist(Union[StrictFloat, StrictInt], min_items=1) = Field(..., description="Array of active learning scores")
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
__properties = ["id", "datasetId", "predictionUUIDTimestamp", "taskName", "scoreType", "scores", "createdAt"]

@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value

@validator('dataset_id')
def dataset_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value

@validator('task_name')
def task_name_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$/")
return value

@validator('score_type')
def score_type_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]*$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]*$/")
return value

class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid

def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))

def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))

@classmethod
def from_json(cls, json_str: str) -> ActiveLearningScoreV2Data:
"""Create an instance of ActiveLearningScoreV2Data from a JSON string"""
return cls.from_dict(json.loads(json_str))

def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict

@classmethod
def from_dict(cls, obj: dict) -> ActiveLearningScoreV2Data:
"""Create an instance of ActiveLearningScoreV2Data from a dict"""
if obj is None:
return None

if not isinstance(obj, dict):
return ActiveLearningScoreV2Data.parse_obj(obj)

# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in ActiveLearningScoreV2Data) in the input: " + str(obj))

_obj = ActiveLearningScoreV2Data.parse_obj({
"id": obj.get("id"),
"dataset_id": obj.get("datasetId"),
"prediction_uuid_timestamp": obj.get("predictionUUIDTimestamp"),
"task_name": obj.get("taskName"),
"score_type": obj.get("scoreType"),
"scores": obj.get("scores"),
"created_at": obj.get("createdAt")
})
return _obj

Original file line number Diff line number Diff line change
@@ -45,7 +45,8 @@ class DockerWorkerConfigV3Docker(BaseModel):
relevant_filenames_file: Optional[StrictStr] = Field(None, alias="relevantFilenamesFile")
selected_sequence_length: Optional[conint(strict=True, ge=1)] = Field(None, alias="selectedSequenceLength")
upload_report: Optional[StrictBool] = Field(None, alias="uploadReport")
__properties = ["checkpoint", "checkpointRunId", "corruptnessCheck", "datasource", "embeddings", "enableTraining", "training", "normalizeEmbeddings", "numProcesses", "numThreads", "outputImageFormat", "pretagging", "pretaggingUpload", "relevantFilenamesFile", "selectedSequenceLength", "uploadReport"]
shutdown_when_job_finished: Optional[StrictBool] = Field(None, alias="shutdownWhenJobFinished")
__properties = ["checkpoint", "checkpointRunId", "corruptnessCheck", "datasource", "embeddings", "enableTraining", "training", "normalizeEmbeddings", "numProcesses", "numThreads", "outputImageFormat", "pretagging", "pretaggingUpload", "relevantFilenamesFile", "selectedSequenceLength", "uploadReport", "shutdownWhenJobFinished"]

@validator('checkpoint_run_id')
def checkpoint_run_id_validate_regular_expression(cls, value):
@@ -124,7 +125,8 @@ def from_dict(cls, obj: dict) -> DockerWorkerConfigV3Docker:
"pretagging_upload": obj.get("pretaggingUpload"),
"relevant_filenames_file": obj.get("relevantFilenamesFile"),
"selected_sequence_length": obj.get("selectedSequenceLength"),
"upload_report": obj.get("uploadReport")
"upload_report": obj.get("uploadReport"),
"shutdown_when_job_finished": obj.get("shutdownWhenJobFinished")
})
return _obj

Original file line number Diff line number Diff line change
@@ -28,8 +28,9 @@ class PredictionSingletonKeypointDetection(PredictionSingletonBase):
PredictionSingletonKeypointDetection
"""
keypoints: conlist(Union[confloat(ge=0, strict=True), conint(ge=0, strict=True)], min_items=3) = Field(..., description="[x1, y2, s1, ..., xk, yk, sk] as outlined by https://docs.lightly.ai/docs/prediction-format#keypoint-detection ")
bbox: Optional[conlist(Union[confloat(ge=0, strict=True), conint(ge=0, strict=True)], max_items=4, min_items=4)] = Field(None, description="The bbox of where a prediction task yielded a finding. [x, y, width, height]")
probabilities: Optional[conlist(Union[confloat(le=1, ge=0, strict=True), conint(le=1, ge=0, strict=True)])] = Field(None, description="The probabilities of it being a certain category other than the one which was selected. The sum of all probabilities should equal 1.")
__properties = ["type", "taskName", "cropDatasetId", "cropSampleId", "categoryId", "score", "keypoints", "probabilities"]
__properties = ["type", "taskName", "cropDatasetId", "cropSampleId", "categoryId", "score", "keypoints", "bbox", "probabilities"]

class Config:
"""Pydantic configuration"""
@@ -81,6 +82,7 @@ def from_dict(cls, obj: dict) -> PredictionSingletonKeypointDetection:
"category_id": obj.get("categoryId"),
"score": obj.get("score"),
"keypoints": obj.get("keypoints"),
"bbox": obj.get("bbox"),
"probabilities": obj.get("probabilities")
})
return _obj
Original file line number Diff line number Diff line change
@@ -27,8 +27,9 @@ class PredictionSingletonKeypointDetectionAllOf(BaseModel):
PredictionSingletonKeypointDetectionAllOf
"""
keypoints: conlist(Union[confloat(ge=0, strict=True), conint(ge=0, strict=True)], min_items=3) = Field(..., description="[x1, y2, s1, ..., xk, yk, sk] as outlined by https://docs.lightly.ai/docs/prediction-format#keypoint-detection ")
bbox: Optional[conlist(Union[confloat(ge=0, strict=True), conint(ge=0, strict=True)], max_items=4, min_items=4)] = Field(None, description="The bbox of where a prediction task yielded a finding. [x, y, width, height]")
probabilities: Optional[conlist(Union[confloat(le=1, ge=0, strict=True), conint(le=1, ge=0, strict=True)])] = Field(None, description="The probabilities of it being a certain category other than the one which was selected. The sum of all probabilities should equal 1.")
__properties = ["keypoints", "probabilities"]
__properties = ["keypoints", "bbox", "probabilities"]

class Config:
"""Pydantic configuration"""
@@ -74,6 +75,7 @@ def from_dict(cls, obj: dict) -> PredictionSingletonKeypointDetectionAllOf:

_obj = PredictionSingletonKeypointDetectionAllOf.parse_obj({
"keypoints": obj.get("keypoints"),
"bbox": obj.get("bbox"),
"probabilities": obj.get("probabilities")
})
return _obj