From a2b537774e79a9576d870b0b85cc3fbdc55bcfdc Mon Sep 17 00:00:00 2001 From: Jen Hamon Date: Tue, 13 May 2025 12:12:14 -0400 Subject: [PATCH 1/7] Backup & restore --- codegen/apis | 2 +- codegen/python-oas-templates | 2 +- pinecone/__init__.py | 4 + .../db_control/api/manage_indexes_api.py | 19 +- .../model/{dedicated_spec.py => byoc_spec.py} | 8 +- .../db_control/model/create_backup_request.py | 8 +- .../create_index_from_backup_response.py | 272 +++++++++++ .../db_control/model/deletion_protection.py | 8 +- .../db_control/model/index_model_spec.py | 12 +- .../openapi/db_control/model/index_spec.py | 12 +- .../openapi/db_control/models/__init__.py | 5 +- .../db_data/api/bulk_operations_api.py | 16 +- .../db_data/api/namespace_operations_api.py | 443 ++++++++++++++++++ .../db_data/api/vector_operations_api.py | 32 +- .../core/openapi/db_data/apis/__init__.py | 1 + .../openapi/db_data/model/delete_request.py | 4 +- .../model/describe_index_stats_request.py | 4 +- .../db_data/model/list_namespaces_response.py | 274 +++++++++++ .../db_data/model/namespace_description.py | 264 +++++++++++ .../openapi/db_data/model/query_request.py | 4 +- .../model/search_records_request_query.py | 4 +- .../model/search_records_request_rerank.py | 12 +- .../db_data/model/start_import_request.py | 4 +- .../core/openapi/db_data/models/__init__.py | 2 + .../openapi/inference/api/inference_api.py | 8 +- .../openapi/inference/model/embed_request.py | 8 +- .../openapi/inference/model/model_info.py | 54 ++- .../openapi/inference/model/rerank_request.py | 12 +- pinecone/db_control/db_control.py | 29 +- pinecone/db_control/db_control_asyncio.py | 27 +- pinecone/db_control/models/__init__.py | 9 + pinecone/db_control/models/backup_list.py | 31 ++ pinecone/db_control/models/backup_model.py | 18 + pinecone/db_control/request_factory.py | 19 +- .../db_control/resources/asyncio/backup.py | 92 ++++ .../db_control/resources/asyncio/index.py | 16 + .../resources/asyncio/restore_job.py | 55 +++ pinecone/db_control/resources/sync/backup.py | 86 ++++ pinecone/db_control/resources/sync/index.py | 37 +- .../db_control/resources/sync/restore_job.py | 55 +++ pinecone/legacy_pinecone_interface.py | 105 +++++ pinecone/openapi_support/api_version.py | 2 +- pinecone/pinecone.py | 62 ++- pinecone/pinecone_asyncio.py | 66 ++- pinecone/pinecone_interface_asyncio.py | 105 +++++ pinecone/utils/__init__.py | 2 + pinecone/utils/require_kwargs.py | 16 + 47 files changed, 2208 insertions(+), 122 deletions(-) rename pinecone/core/openapi/db_control/model/{dedicated_spec.py => byoc_spec.py} (98%) create mode 100644 pinecone/core/openapi/db_control/model/create_index_from_backup_response.py create mode 100644 pinecone/core/openapi/db_data/api/namespace_operations_api.py create mode 100644 pinecone/core/openapi/db_data/model/list_namespaces_response.py create mode 100644 pinecone/core/openapi/db_data/model/namespace_description.py create mode 100644 pinecone/db_control/models/backup_list.py create mode 100644 pinecone/db_control/models/backup_model.py create mode 100644 pinecone/db_control/resources/asyncio/backup.py create mode 100644 pinecone/db_control/resources/asyncio/restore_job.py create mode 100644 pinecone/db_control/resources/sync/backup.py create mode 100644 pinecone/db_control/resources/sync/restore_job.py create mode 100644 pinecone/utils/require_kwargs.py diff --git a/codegen/apis b/codegen/apis index ba143abc7..4b1c83b3b 160000 --- a/codegen/apis +++ b/codegen/apis @@ -1 +1 @@ -Subproject commit ba143abc7449abfcf0b6635f1aabff2400dac762 +Subproject commit 4b1c83b3b6669e6596151a575c284ee2cf4977a7 diff --git a/codegen/python-oas-templates b/codegen/python-oas-templates index 0f6ff6858..c7c75f57c 160000 --- a/codegen/python-oas-templates +++ b/codegen/python-oas-templates @@ -1 +1 @@ -Subproject commit 0f6ff68585355dd11e959e05859928d878d7854b +Subproject commit c7c75f57c6dfd0228a7bead444ea1004c57e0de3 diff --git a/pinecone/__init__.py b/pinecone/__init__.py index b2c0f5c82..d6d73c954 100644 --- a/pinecone/__init__.py +++ b/pinecone/__init__.py @@ -76,6 +76,10 @@ "PodSpec": ("pinecone.db_control.models", "PodSpec"), "PodSpecDefinition": ("pinecone.db_control.models", "PodSpecDefinition"), "PodType": ("pinecone.db_control.enums", "PodType"), + "RestoreJobModel": ("pinecone.db_control.models", "RestoreJobModel"), + "RestoreJobList": ("pinecone.db_control.models", "RestoreJobList"), + "BackupModel": ("pinecone.db_control.models", "BackupModel"), + "BackupList": ("pinecone.db_control.models", "BackupList"), } _config_lazy_imports = { diff --git a/pinecone/core/openapi/db_control/api/manage_indexes_api.py b/pinecone/core/openapi/db_control/api/manage_indexes_api.py index 3796fd264..50d6f5931 100644 --- a/pinecone/core/openapi/db_control/api/manage_indexes_api.py +++ b/pinecone/core/openapi/db_control/api/manage_indexes_api.py @@ -36,6 +36,9 @@ from pinecone.core.openapi.db_control.model.create_index_from_backup_request import ( CreateIndexFromBackupRequest, ) +from pinecone.core.openapi.db_control.model.create_index_from_backup_response import ( + CreateIndexFromBackupResponse, +) from pinecone.core.openapi.db_control.model.create_index_request import CreateIndexRequest from pinecone.core.openapi.db_control.model.error_response import ErrorResponse from pinecone.core.openapi.db_control.model.index_list import IndexList @@ -281,7 +284,7 @@ def __create_collection( def __create_index(self, create_index_request, **kwargs: ExtraOpenApiKwargsTypedDict): """Create an index # noqa: E501 - Create a Pinecone index. This is where you specify the measure of similarity, the dimension of vectors to be stored in the index, which cloud provider you would like to deploy with, and more. For guidance and examples, see [Create an index](https://docs.pinecone.io/guides/indexes/create-an-index#create-a-serverless-index). # noqa: E501 + Create a Pinecone index. This is where you specify the measure of similarity, the dimension of vectors to be stored in the index, which cloud provider you would like to deploy with, and more. For guidance and examples, see [Create an index](https://docs.pinecone.io/guides/index-data/create-an-index). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -352,7 +355,7 @@ def __create_index_for_model( ): """Create an index with integrated embedding # noqa: E501 - Create an index with integrated embedding. With this type of index, you provide source text, and Pinecone uses a [hosted embedding model](https://docs.pinecone.io/guides/inference/understanding-inference#embedding-models) to convert the text automatically during [upsert](https://docs.pinecone.io/reference/api/2025-01/data-plane/upsert_records) and [search](https://docs.pinecone.io/reference/api/2025-01/data-plane/search_records). For guidance and examples, see [Create an index](https://docs.pinecone.io/guides/indexes/create-an-index#integrated-embedding). # noqa: E501 + Create an index with integrated embedding. With this type of index, you provide source text, and Pinecone uses a [hosted embedding model](https://docs.pinecone.io/guides/index-data/create-an-index#embedding-models) to convert the text automatically during [upsert](https://docs.pinecone.io/reference/api/2025-01/data-plane/upsert_records) and [search](https://docs.pinecone.io/reference/api/2025-01/data-plane/search_records). For guidance and examples, see [Create an index](https://docs.pinecone.io/guides/index-data/create-an-index#integrated-embedding). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -453,7 +456,7 @@ def __create_index_from_backup( async_req (bool): execute request asynchronously Returns: - None + CreateIndexFromBackupResponse If the method is called asynchronously, returns the request thread. """ @@ -464,7 +467,7 @@ def __create_index_from_backup( self.create_index_from_backup = _Endpoint( settings={ - "response_type": None, + "response_type": (CreateIndexFromBackupResponse,), "auth": ["ApiKeyAuth"], "endpoint_path": "/backups/{backup_id}/create-index", "operation_id": "create_index_from_backup", @@ -1519,7 +1522,7 @@ async def __create_collection(self, create_collection_request, **kwargs): async def __create_index(self, create_index_request, **kwargs): """Create an index # noqa: E501 - Create a Pinecone index. This is where you specify the measure of similarity, the dimension of vectors to be stored in the index, which cloud provider you would like to deploy with, and more. For guidance and examples, see [Create an index](https://docs.pinecone.io/guides/indexes/create-an-index#create-a-serverless-index). # noqa: E501 + Create a Pinecone index. This is where you specify the measure of similarity, the dimension of vectors to be stored in the index, which cloud provider you would like to deploy with, and more. For guidance and examples, see [Create an index](https://docs.pinecone.io/guides/index-data/create-an-index). # noqa: E501 Args: @@ -1581,7 +1584,7 @@ async def __create_index(self, create_index_request, **kwargs): async def __create_index_for_model(self, create_index_for_model_request, **kwargs): """Create an index with integrated embedding # noqa: E501 - Create an index with integrated embedding. With this type of index, you provide source text, and Pinecone uses a [hosted embedding model](https://docs.pinecone.io/guides/inference/understanding-inference#embedding-models) to convert the text automatically during [upsert](https://docs.pinecone.io/reference/api/2025-01/data-plane/upsert_records) and [search](https://docs.pinecone.io/reference/api/2025-01/data-plane/search_records). For guidance and examples, see [Create an index](https://docs.pinecone.io/guides/indexes/create-an-index#integrated-embedding). # noqa: E501 + Create an index with integrated embedding. With this type of index, you provide source text, and Pinecone uses a [hosted embedding model](https://docs.pinecone.io/guides/index-data/create-an-index#embedding-models) to convert the text automatically during [upsert](https://docs.pinecone.io/reference/api/2025-01/data-plane/upsert_records) and [search](https://docs.pinecone.io/reference/api/2025-01/data-plane/search_records). For guidance and examples, see [Create an index](https://docs.pinecone.io/guides/index-data/create-an-index#integrated-embedding). # noqa: E501 Args: @@ -1670,7 +1673,7 @@ async def __create_index_from_backup( Default is True. Returns: - None + CreateIndexFromBackupResponse """ self._process_openapi_kwargs(kwargs) kwargs["backup_id"] = backup_id @@ -1679,7 +1682,7 @@ async def __create_index_from_backup( self.create_index_from_backup = _AsyncioEndpoint( settings={ - "response_type": None, + "response_type": (CreateIndexFromBackupResponse,), "auth": ["ApiKeyAuth"], "endpoint_path": "/backups/{backup_id}/create-index", "operation_id": "create_index_from_backup", diff --git a/pinecone/core/openapi/db_control/model/dedicated_spec.py b/pinecone/core/openapi/db_control/model/byoc_spec.py similarity index 98% rename from pinecone/core/openapi/db_control/model/dedicated_spec.py rename to pinecone/core/openapi/db_control/model/byoc_spec.py index cb2c5d207..9b693e78c 100644 --- a/pinecone/core/openapi/db_control/model/dedicated_spec.py +++ b/pinecone/core/openapi/db_control/model/byoc_spec.py @@ -30,10 +30,10 @@ from typing import Dict, Literal, Tuple, Set, Any, Type, TypeVar from pinecone.openapi_support import PropertyValidationTypedDict, cached_class_property -T = TypeVar("T", bound="DedicatedSpec") +T = TypeVar("T", bound="ByocSpec") -class DedicatedSpec(ModelNormal): +class ByocSpec(ModelNormal): """NOTE: This class is @generated using OpenAPI. Do not edit the class manually. @@ -102,7 +102,7 @@ def discriminator(cls): @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls: Type[T], environment, *args, **kwargs) -> T: # noqa: E501 - """DedicatedSpec - a model defined in OpenAPI + """ByocSpec - a model defined in OpenAPI Args: environment (str): The environment where the index is hosted. @@ -189,7 +189,7 @@ def _from_openapi_data(cls: Type[T], environment, *args, **kwargs) -> T: # noqa @convert_js_args_to_python_args def __init__(self, environment, *args, **kwargs) -> None: # noqa: E501 - """DedicatedSpec - a model defined in OpenAPI + """ByocSpec - a model defined in OpenAPI Args: environment (str): The environment where the index is hosted. diff --git a/pinecone/core/openapi/db_control/model/create_backup_request.py b/pinecone/core/openapi/db_control/model/create_backup_request.py index 106e9ab88..bb79710d8 100644 --- a/pinecone/core/openapi/db_control/model/create_backup_request.py +++ b/pinecone/core/openapi/db_control/model/create_backup_request.py @@ -61,9 +61,7 @@ class CreateBackupRequest(ModelNormal): allowed_values: Dict[Tuple[str, ...], Dict[str, Any]] = {} - validations: Dict[Tuple[str, ...], PropertyValidationTypedDict] = { - ("name",): {"max_length": 45, "min_length": 1} - } + validations: Dict[Tuple[str, ...], PropertyValidationTypedDict] = {} @cached_class_property def additional_properties_type(cls): @@ -139,7 +137,7 @@ def _from_openapi_data(cls: Type[T], *args, **kwargs) -> T: # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - name (str): The name of the index. Resource name must be 1-45 characters long, start and end with an alphanumeric character, and consist only of lower case alphanumeric characters or '-'. [optional] # noqa: E501 + name (str): The name of the backup. [optional] # noqa: E501 description (str): A description of the backup. [optional] # noqa: E501 """ @@ -224,7 +222,7 @@ def __init__(self, *args, **kwargs) -> None: # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - name (str): The name of the index. Resource name must be 1-45 characters long, start and end with an alphanumeric character, and consist only of lower case alphanumeric characters or '-'. [optional] # noqa: E501 + name (str): The name of the backup. [optional] # noqa: E501 description (str): A description of the backup. [optional] # noqa: E501 """ diff --git a/pinecone/core/openapi/db_control/model/create_index_from_backup_response.py b/pinecone/core/openapi/db_control/model/create_index_from_backup_response.py new file mode 100644 index 000000000..ee6e7f369 --- /dev/null +++ b/pinecone/core/openapi/db_control/model/create_index_from_backup_response.py @@ -0,0 +1,272 @@ +""" +Pinecone Control Plane API + +Pinecone is a vector database that makes it easy to search and retrieve billions of high-dimensional vectors. # noqa: E501 + +This file is @generated using OpenAPI. + +The version of the OpenAPI document: 2025-04 +Contact: support@pinecone.io +""" + +from pinecone.openapi_support.model_utils import ( # noqa: F401 + PineconeApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + OpenApiModel, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, +) +from pinecone.openapi_support.exceptions import PineconeApiAttributeError + + +from typing import Dict, Literal, Tuple, Set, Any, Type, TypeVar +from pinecone.openapi_support import PropertyValidationTypedDict, cached_class_property + +T = TypeVar("T", bound="CreateIndexFromBackupResponse") + + +class CreateIndexFromBackupResponse(ModelNormal): + """NOTE: This class is @generated using OpenAPI. + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + _data_store: Dict[str, Any] + _check_type: bool + + allowed_values: Dict[Tuple[str, ...], Dict[str, Any]] = {} + + validations: Dict[Tuple[str, ...], PropertyValidationTypedDict] = {} + + @cached_class_property + def additional_properties_type(cls): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + return (bool, dict, float, int, list, str, none_type) # noqa: E501 + + _nullable = False + + @cached_class_property + def openapi_types(cls): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "restore_job_id": (str,), # noqa: E501 + "index_id": (str,), # noqa: E501 + } + + @cached_class_property + def discriminator(cls): + return None + + attribute_map: Dict[str, str] = { + "restore_job_id": "restore_job_id", # noqa: E501 + "index_id": "index_id", # noqa: E501 + } + + read_only_vars: Set[str] = set([]) + + _composed_schemas: Dict[Literal["allOf", "oneOf", "anyOf"], Any] = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls: Type[T], restore_job_id, index_id, *args, **kwargs) -> T: # noqa: E501 + """CreateIndexFromBackupResponse - a model defined in OpenAPI + + Args: + restore_job_id (str): The ID of the restore job that was created. + index_id (str): The ID of the index that was created from the backup. + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise PineconeApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % (args, self.__class__.__name__), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.restore_job_id = restore_job_id + self.index_id = index_id + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, restore_job_id, index_id, *args, **kwargs) -> None: # noqa: E501 + """CreateIndexFromBackupResponse - a model defined in OpenAPI + + Args: + restore_job_id (str): The ID of the restore job that was created. + index_id (str): The ID of the index that was created from the backup. + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise PineconeApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % (args, self.__class__.__name__), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.restore_job_id = restore_job_id + self.index_id = index_id + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise PineconeApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes." + ) diff --git a/pinecone/core/openapi/db_control/model/deletion_protection.py b/pinecone/core/openapi/db_control/model/deletion_protection.py index 776826268..3194d70e7 100644 --- a/pinecone/core/openapi/db_control/model/deletion_protection.py +++ b/pinecone/core/openapi/db_control/model/deletion_protection.py @@ -111,10 +111,10 @@ def __init__(self, *args, **kwargs) -> None: Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): Whether [deletion protection](http://docs.pinecone.io/guides/indexes/manage-indexes#configure-deletion-protection) is enabled/disabled for the index. . if omitted defaults to "disabled", must be one of ["disabled", "enabled", ] # noqa: E501 + args[0] (str): Whether [deletion protection](http://docs.pinecone.io/guides/manage-data/manage-indexes#configure-deletion-protection) is enabled/disabled for the index. . if omitted defaults to "disabled", must be one of ["disabled", "enabled", ] # noqa: E501 Keyword Args: - value (str): Whether [deletion protection](http://docs.pinecone.io/guides/indexes/manage-indexes#configure-deletion-protection) is enabled/disabled for the index. . if omitted defaults to "disabled", must be one of ["disabled", "enabled", ] # noqa: E501 + value (str): Whether [deletion protection](http://docs.pinecone.io/guides/manage-data/manage-indexes#configure-deletion-protection) is enabled/disabled for the index. . if omitted defaults to "disabled", must be one of ["disabled", "enabled", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. @@ -195,10 +195,10 @@ def _from_openapi_data(cls: Type[T], *args, **kwargs) -> T: Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): Whether [deletion protection](http://docs.pinecone.io/guides/indexes/manage-indexes#configure-deletion-protection) is enabled/disabled for the index. if omitted defaults to "disabled", must be one of ["disabled", "enabled", ] # noqa: E501 + args[0] (str): Whether [deletion protection](http://docs.pinecone.io/guides/manage-data/manage-indexes#configure-deletion-protection) is enabled/disabled for the index. if omitted defaults to "disabled", must be one of ["disabled", "enabled", ] # noqa: E501 Keyword Args: - value (str): Whether [deletion protection](http://docs.pinecone.io/guides/indexes/manage-indexes#configure-deletion-protection) is enabled/disabled for the index. if omitted defaults to "disabled", must be one of ["disabled", "enabled", ] # noqa: E501 + value (str): Whether [deletion protection](http://docs.pinecone.io/guides/manage-data/manage-indexes#configure-deletion-protection) is enabled/disabled for the index. if omitted defaults to "disabled", must be one of ["disabled", "enabled", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. diff --git a/pinecone/core/openapi/db_control/model/index_model_spec.py b/pinecone/core/openapi/db_control/model/index_model_spec.py index 6bfa0a752..574deea17 100644 --- a/pinecone/core/openapi/db_control/model/index_model_spec.py +++ b/pinecone/core/openapi/db_control/model/index_model_spec.py @@ -28,11 +28,11 @@ def lazy_import(): - from pinecone.core.openapi.db_control.model.dedicated_spec import DedicatedSpec + from pinecone.core.openapi.db_control.model.byoc_spec import ByocSpec from pinecone.core.openapi.db_control.model.pod_spec import PodSpec from pinecone.core.openapi.db_control.model.serverless_spec import ServerlessSpec - globals()["DedicatedSpec"] = DedicatedSpec + globals()["ByocSpec"] = ByocSpec globals()["PodSpec"] = PodSpec globals()["ServerlessSpec"] = ServerlessSpec @@ -96,7 +96,7 @@ def openapi_types(cls): """ lazy_import() return { - "dedicated": (DedicatedSpec,), # noqa: E501 + "byoc": (ByocSpec,), # noqa: E501 "pod": (PodSpec,), # noqa: E501 "serverless": (ServerlessSpec,), # noqa: E501 } @@ -106,7 +106,7 @@ def discriminator(cls): return None attribute_map: Dict[str, str] = { - "dedicated": "dedicated", # noqa: E501 + "byoc": "byoc", # noqa: E501 "pod": "pod", # noqa: E501 "serverless": "serverless", # noqa: E501 } @@ -151,7 +151,7 @@ def _from_openapi_data(cls: Type[T], *args, **kwargs) -> T: # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - dedicated (DedicatedSpec): [optional] # noqa: E501 + byoc (ByocSpec): [optional] # noqa: E501 pod (PodSpec): [optional] # noqa: E501 serverless (ServerlessSpec): [optional] # noqa: E501 """ @@ -237,7 +237,7 @@ def __init__(self, *args, **kwargs) -> None: # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - dedicated (DedicatedSpec): [optional] # noqa: E501 + byoc (ByocSpec): [optional] # noqa: E501 pod (PodSpec): [optional] # noqa: E501 serverless (ServerlessSpec): [optional] # noqa: E501 """ diff --git a/pinecone/core/openapi/db_control/model/index_spec.py b/pinecone/core/openapi/db_control/model/index_spec.py index cee103f2c..9e0cc24bc 100644 --- a/pinecone/core/openapi/db_control/model/index_spec.py +++ b/pinecone/core/openapi/db_control/model/index_spec.py @@ -28,11 +28,11 @@ def lazy_import(): - from pinecone.core.openapi.db_control.model.dedicated_spec import DedicatedSpec + from pinecone.core.openapi.db_control.model.byoc_spec import ByocSpec from pinecone.core.openapi.db_control.model.pod_spec import PodSpec from pinecone.core.openapi.db_control.model.serverless_spec import ServerlessSpec - globals()["DedicatedSpec"] = DedicatedSpec + globals()["ByocSpec"] = ByocSpec globals()["PodSpec"] = PodSpec globals()["ServerlessSpec"] = ServerlessSpec @@ -91,7 +91,7 @@ def openapi_types(cls): return { "serverless": (ServerlessSpec,), # noqa: E501 "pod": (PodSpec,), # noqa: E501 - "dedicated": (DedicatedSpec,), # noqa: E501 + "byoc": (ByocSpec,), # noqa: E501 } @cached_class_property @@ -101,7 +101,7 @@ def discriminator(cls): attribute_map: Dict[str, str] = { "serverless": "serverless", # noqa: E501 "pod": "pod", # noqa: E501 - "dedicated": "dedicated", # noqa: E501 + "byoc": "byoc", # noqa: E501 } read_only_vars: Set[str] = set([]) @@ -146,7 +146,7 @@ def _from_openapi_data(cls: Type[T], *args, **kwargs) -> T: # noqa: E501 _visited_composed_classes = (Animal,) serverless (ServerlessSpec): [optional] # noqa: E501 pod (PodSpec): [optional] # noqa: E501 - dedicated (DedicatedSpec): [optional] # noqa: E501 + byoc (ByocSpec): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -232,7 +232,7 @@ def __init__(self, *args, **kwargs) -> None: # noqa: E501 _visited_composed_classes = (Animal,) serverless (ServerlessSpec): [optional] # noqa: E501 pod (PodSpec): [optional] # noqa: E501 - dedicated (DedicatedSpec): [optional] # noqa: E501 + byoc (ByocSpec): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/pinecone/core/openapi/db_control/models/__init__.py b/pinecone/core/openapi/db_control/models/__init__.py index 59b93661c..99c3bb9df 100644 --- a/pinecone/core/openapi/db_control/models/__init__.py +++ b/pinecone/core/openapi/db_control/models/__init__.py @@ -11,6 +11,7 @@ from pinecone.core.openapi.db_control.model.backup_list import BackupList from pinecone.core.openapi.db_control.model.backup_model import BackupModel +from pinecone.core.openapi.db_control.model.byoc_spec import ByocSpec from pinecone.core.openapi.db_control.model.collection_list import CollectionList from pinecone.core.openapi.db_control.model.collection_model import CollectionModel from pinecone.core.openapi.db_control.model.configure_index_request import ConfigureIndexRequest @@ -34,8 +35,10 @@ from pinecone.core.openapi.db_control.model.create_index_from_backup_request import ( CreateIndexFromBackupRequest, ) +from pinecone.core.openapi.db_control.model.create_index_from_backup_response import ( + CreateIndexFromBackupResponse, +) from pinecone.core.openapi.db_control.model.create_index_request import CreateIndexRequest -from pinecone.core.openapi.db_control.model.dedicated_spec import DedicatedSpec from pinecone.core.openapi.db_control.model.deletion_protection import DeletionProtection from pinecone.core.openapi.db_control.model.error_response import ErrorResponse from pinecone.core.openapi.db_control.model.error_response_error import ErrorResponseError diff --git a/pinecone/core/openapi/db_data/api/bulk_operations_api.py b/pinecone/core/openapi/db_data/api/bulk_operations_api.py index c0be95433..854e37af8 100644 --- a/pinecone/core/openapi/db_data/api/bulk_operations_api.py +++ b/pinecone/core/openapi/db_data/api/bulk_operations_api.py @@ -44,7 +44,7 @@ def __init__(self, api_client=None) -> None: def __cancel_bulk_import(self, id, **kwargs: ExtraOpenApiKwargsTypedDict): """Cancel an import # noqa: E501 - Cancel an import operation if it is not yet finished. It has no effect if the operation is already finished. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/data/import-data). # noqa: E501 + Cancel an import operation if it is not yet finished. It has no effect if the operation is already finished. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/index-data/import-data). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -113,7 +113,7 @@ def __cancel_bulk_import(self, id, **kwargs: ExtraOpenApiKwargsTypedDict): def __describe_bulk_import(self, id, **kwargs: ExtraOpenApiKwargsTypedDict): """Describe an import # noqa: E501 - Return details of a specific import operation. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/data/import-data). # noqa: E501 + Return details of a specific import operation. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/index-data/import-data). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -182,7 +182,7 @@ def __describe_bulk_import(self, id, **kwargs: ExtraOpenApiKwargsTypedDict): def __list_bulk_imports(self, **kwargs: ExtraOpenApiKwargsTypedDict): """List imports # noqa: E501 - List all recent and ongoing import operations. By default, `list_imports` returns up to 100 imports per page. If the `limit` parameter is set, `list` returns up to that number of imports instead. Whenever there are additional IDs to return, the response also includes a `pagination_token` that you can use to get the next batch of imports. When the response does not include a `pagination_token`, there are no more imports to return. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/data/import-data). # noqa: E501 + List all recent and ongoing import operations. By default, `list_imports` returns up to 100 imports per page. If the `limit` parameter is set, `list` returns up to that number of imports instead. Whenever there are additional IDs to return, the response also includes a `pagination_token` that you can use to get the next batch of imports. When the response does not include a `pagination_token`, there are no more imports to return. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/index-data/import-data). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -250,7 +250,7 @@ def __list_bulk_imports(self, **kwargs: ExtraOpenApiKwargsTypedDict): def __start_bulk_import(self, start_import_request, **kwargs: ExtraOpenApiKwargsTypedDict): """Start import # noqa: E501 - Start an asynchronous import of vectors from object storage into an index. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/data/import-data). # noqa: E501 + Start an asynchronous import of vectors from object storage into an index. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/index-data/import-data). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -331,7 +331,7 @@ def __init__(self, api_client=None) -> None: async def __cancel_bulk_import(self, id, **kwargs): """Cancel an import # noqa: E501 - Cancel an import operation if it is not yet finished. It has no effect if the operation is already finished. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/data/import-data). # noqa: E501 + Cancel an import operation if it is not yet finished. It has no effect if the operation is already finished. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/index-data/import-data). # noqa: E501 Args: @@ -393,7 +393,7 @@ async def __cancel_bulk_import(self, id, **kwargs): async def __describe_bulk_import(self, id, **kwargs): """Describe an import # noqa: E501 - Return details of a specific import operation. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/data/import-data). # noqa: E501 + Return details of a specific import operation. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/index-data/import-data). # noqa: E501 Args: @@ -455,7 +455,7 @@ async def __describe_bulk_import(self, id, **kwargs): async def __list_bulk_imports(self, **kwargs): """List imports # noqa: E501 - List all recent and ongoing import operations. By default, `list_imports` returns up to 100 imports per page. If the `limit` parameter is set, `list` returns up to that number of imports instead. Whenever there are additional IDs to return, the response also includes a `pagination_token` that you can use to get the next batch of imports. When the response does not include a `pagination_token`, there are no more imports to return. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/data/import-data). # noqa: E501 + List all recent and ongoing import operations. By default, `list_imports` returns up to 100 imports per page. If the `limit` parameter is set, `list` returns up to that number of imports instead. Whenever there are additional IDs to return, the response also includes a `pagination_token` that you can use to get the next batch of imports. When the response does not include a `pagination_token`, there are no more imports to return. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/index-data/import-data). # noqa: E501 @@ -516,7 +516,7 @@ async def __list_bulk_imports(self, **kwargs): async def __start_bulk_import(self, start_import_request, **kwargs): """Start import # noqa: E501 - Start an asynchronous import of vectors from object storage into an index. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/data/import-data). # noqa: E501 + Start an asynchronous import of vectors from object storage into an index. For guidance and examples, see [Import data](https://docs.pinecone.io/guides/index-data/import-data). # noqa: E501 Args: diff --git a/pinecone/core/openapi/db_data/api/namespace_operations_api.py b/pinecone/core/openapi/db_data/api/namespace_operations_api.py new file mode 100644 index 000000000..986efbb21 --- /dev/null +++ b/pinecone/core/openapi/db_data/api/namespace_operations_api.py @@ -0,0 +1,443 @@ +""" +Pinecone Data Plane API + +Pinecone is a vector database that makes it easy to search and retrieve billions of high-dimensional vectors. # noqa: E501 + +This file is @generated using OpenAPI. + +The version of the OpenAPI document: 2025-04 +Contact: support@pinecone.io +""" + +from pinecone.openapi_support import ApiClient, AsyncioApiClient +from pinecone.openapi_support.endpoint_utils import ( + ExtraOpenApiKwargsTypedDict, + KwargsWithOpenApiKwargDefaultsTypedDict, +) +from pinecone.openapi_support.endpoint import Endpoint as _Endpoint, ExtraOpenApiKwargsTypedDict +from pinecone.openapi_support.asyncio_endpoint import AsyncioEndpoint as _AsyncioEndpoint +from pinecone.openapi_support.model_utils import ( # noqa: F401 + date, + datetime, + file_type, + none_type, + validate_and_convert_types, +) +from pinecone.core.openapi.db_data.model.list_namespaces_response import ListNamespacesResponse +from pinecone.core.openapi.db_data.model.namespace_description import NamespaceDescription +from pinecone.core.openapi.db_data.model.rpc_status import RpcStatus + + +class NamespaceOperationsApi: + """NOTE: This class is @generated using OpenAPI. + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient() + self.api_client = api_client + + def __delete_namespace(self, namespace, **kwargs: ExtraOpenApiKwargsTypedDict): + """Delete a namespace # noqa: E501 + + Delete a namespace from an index. # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.delete_namespace(namespace, async_req=True) + >>> result = thread.get() + + Args: + namespace (str): The namespace to delete + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + async_req (bool): execute request asynchronously + + Returns: + {str: (bool, dict, float, int, list, str, none_type)} + If the method is called asynchronously, returns the request + thread. + """ + kwargs = self._process_openapi_kwargs(kwargs) + kwargs["namespace"] = namespace + return self.call_with_http_info(**kwargs) + + self.delete_namespace = _Endpoint( + settings={ + "response_type": ({str: (bool, dict, float, int, list, str, none_type)},), + "auth": ["ApiKeyAuth"], + "endpoint_path": "/namespaces/{namespace}", + "operation_id": "delete_namespace", + "http_method": "DELETE", + "servers": None, + }, + params_map={ + "all": ["namespace"], + "required": ["namespace"], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": {"namespace": (str,)}, + "attribute_map": {"namespace": "namespace"}, + "location_map": {"namespace": "path"}, + "collection_format_map": {}, + }, + headers_map={"accept": ["application/json"], "content_type": []}, + api_client=api_client, + callable=__delete_namespace, + ) + + def __describe_namespace(self, namespace, **kwargs: ExtraOpenApiKwargsTypedDict): + """Describe a namespace # noqa: E501 + + Describe a namespace within an index, showing the vector count within the namespace. # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.describe_namespace(namespace, async_req=True) + >>> result = thread.get() + + Args: + namespace (str): The namespace to describe + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + async_req (bool): execute request asynchronously + + Returns: + NamespaceDescription + If the method is called asynchronously, returns the request + thread. + """ + kwargs = self._process_openapi_kwargs(kwargs) + kwargs["namespace"] = namespace + return self.call_with_http_info(**kwargs) + + self.describe_namespace = _Endpoint( + settings={ + "response_type": (NamespaceDescription,), + "auth": ["ApiKeyAuth"], + "endpoint_path": "/namespaces/{namespace}", + "operation_id": "describe_namespace", + "http_method": "GET", + "servers": None, + }, + params_map={ + "all": ["namespace"], + "required": ["namespace"], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": {"namespace": (str,)}, + "attribute_map": {"namespace": "namespace"}, + "location_map": {"namespace": "path"}, + "collection_format_map": {}, + }, + headers_map={"accept": ["application/json"], "content_type": []}, + api_client=api_client, + callable=__describe_namespace, + ) + + def __list_namespaces(self, **kwargs: ExtraOpenApiKwargsTypedDict): + """Get list of all namespaces # noqa: E501 + + Get a list of all namespaces within an index. # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.list_namespaces(async_req=True) + >>> result = thread.get() + + + Keyword Args: + limit (int): Max number namespaces to return per page. [optional] + pagination_token (str): Pagination token to continue a previous listing operation. [optional] + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + async_req (bool): execute request asynchronously + + Returns: + ListNamespacesResponse + If the method is called asynchronously, returns the request + thread. + """ + kwargs = self._process_openapi_kwargs(kwargs) + return self.call_with_http_info(**kwargs) + + self.list_namespaces = _Endpoint( + settings={ + "response_type": (ListNamespacesResponse,), + "auth": ["ApiKeyAuth"], + "endpoint_path": "/namespaces", + "operation_id": "list_namespaces", + "http_method": "GET", + "servers": None, + }, + params_map={ + "all": ["limit", "pagination_token"], + "required": [], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": {"limit": (int,), "pagination_token": (str,)}, + "attribute_map": {"limit": "limit", "pagination_token": "paginationToken"}, + "location_map": {"limit": "query", "pagination_token": "query"}, + "collection_format_map": {}, + }, + headers_map={"accept": ["application/json"], "content_type": []}, + api_client=api_client, + callable=__list_namespaces, + ) + + +class AsyncioNamespaceOperationsApi: + """NOTE: This class is @generated using OpenAPI + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = AsyncioApiClient() + self.api_client = api_client + + async def __delete_namespace(self, namespace, **kwargs): + """Delete a namespace # noqa: E501 + + Delete a namespace from an index. # noqa: E501 + + + Args: + namespace (str): The namespace to delete + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + + Returns: + {str: (bool, dict, float, int, list, str, none_type)} + """ + self._process_openapi_kwargs(kwargs) + kwargs["namespace"] = namespace + return await self.call_with_http_info(**kwargs) + + self.delete_namespace = _AsyncioEndpoint( + settings={ + "response_type": ({str: (bool, dict, float, int, list, str, none_type)},), + "auth": ["ApiKeyAuth"], + "endpoint_path": "/namespaces/{namespace}", + "operation_id": "delete_namespace", + "http_method": "DELETE", + "servers": None, + }, + params_map={ + "all": ["namespace"], + "required": ["namespace"], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": {"namespace": (str,)}, + "attribute_map": {"namespace": "namespace"}, + "location_map": {"namespace": "path"}, + "collection_format_map": {}, + }, + headers_map={"accept": ["application/json"], "content_type": []}, + api_client=api_client, + callable=__delete_namespace, + ) + + async def __describe_namespace(self, namespace, **kwargs): + """Describe a namespace # noqa: E501 + + Describe a namespace within an index, showing the vector count within the namespace. # noqa: E501 + + + Args: + namespace (str): The namespace to describe + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + + Returns: + NamespaceDescription + """ + self._process_openapi_kwargs(kwargs) + kwargs["namespace"] = namespace + return await self.call_with_http_info(**kwargs) + + self.describe_namespace = _AsyncioEndpoint( + settings={ + "response_type": (NamespaceDescription,), + "auth": ["ApiKeyAuth"], + "endpoint_path": "/namespaces/{namespace}", + "operation_id": "describe_namespace", + "http_method": "GET", + "servers": None, + }, + params_map={ + "all": ["namespace"], + "required": ["namespace"], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": {"namespace": (str,)}, + "attribute_map": {"namespace": "namespace"}, + "location_map": {"namespace": "path"}, + "collection_format_map": {}, + }, + headers_map={"accept": ["application/json"], "content_type": []}, + api_client=api_client, + callable=__describe_namespace, + ) + + async def __list_namespaces(self, **kwargs): + """Get list of all namespaces # noqa: E501 + + Get a list of all namespaces within an index. # noqa: E501 + + + + Keyword Args: + limit (int): Max number namespaces to return per page. [optional] + pagination_token (str): Pagination token to continue a previous listing operation. [optional] + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + + Returns: + ListNamespacesResponse + """ + self._process_openapi_kwargs(kwargs) + return await self.call_with_http_info(**kwargs) + + self.list_namespaces = _AsyncioEndpoint( + settings={ + "response_type": (ListNamespacesResponse,), + "auth": ["ApiKeyAuth"], + "endpoint_path": "/namespaces", + "operation_id": "list_namespaces", + "http_method": "GET", + "servers": None, + }, + params_map={ + "all": ["limit", "pagination_token"], + "required": [], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": {"limit": (int,), "pagination_token": (str,)}, + "attribute_map": {"limit": "limit", "pagination_token": "paginationToken"}, + "location_map": {"limit": "query", "pagination_token": "query"}, + "collection_format_map": {}, + }, + headers_map={"accept": ["application/json"], "content_type": []}, + api_client=api_client, + callable=__list_namespaces, + ) diff --git a/pinecone/core/openapi/db_data/api/vector_operations_api.py b/pinecone/core/openapi/db_data/api/vector_operations_api.py index a5a3c70e5..25ad63c5e 100644 --- a/pinecone/core/openapi/db_data/api/vector_operations_api.py +++ b/pinecone/core/openapi/db_data/api/vector_operations_api.py @@ -55,7 +55,7 @@ def __init__(self, api_client=None) -> None: def __delete_vectors(self, delete_request, **kwargs: ExtraOpenApiKwargsTypedDict): """Delete vectors # noqa: E501 - Delete vectors by id from a single namespace. For guidance and examples, see [Delete data](https://docs.pinecone.io/guides/data/delete-data). # noqa: E501 + Delete vectors by id from a single namespace. For guidance and examples, see [Delete data](https://docs.pinecone.io/guides/manage-data/delete-data). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -195,7 +195,7 @@ def __describe_index_stats( def __fetch_vectors(self, ids, **kwargs: ExtraOpenApiKwargsTypedDict): """Fetch vectors # noqa: E501 - Look up and return vectors by ID from a single namespace. The returned vectors include the vector data and/or metadata. For guidance and examples, see [Fetch data](https://docs.pinecone.io/guides/data/fetch-data). # noqa: E501 + Look up and return vectors by ID from a single namespace. The returned vectors include the vector data and/or metadata. For guidance and examples, see [Fetch data](https://docs.pinecone.io/guides/manage-data/fetch-data). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -265,7 +265,7 @@ def __fetch_vectors(self, ids, **kwargs: ExtraOpenApiKwargsTypedDict): def __list_vectors(self, **kwargs: ExtraOpenApiKwargsTypedDict): """List vector IDs # noqa: E501 - List the IDs of vectors in a single namespace of a serverless index. An optional prefix can be passed to limit the results to IDs with a common prefix. Returns up to 100 IDs at a time by default in sorted order (bitwise \"C\" collation). If the `limit` parameter is set, `list` returns up to that number of IDs instead. Whenever there are additional IDs to return, the response also includes a `pagination_token` that you can use to get the next batch of IDs. When the response does not include a `pagination_token`, there are no more IDs to return. For guidance and examples, see [List record IDs](https://docs.pinecone.io/guides/data/list-record-ids). **Note:** `list` is supported only for serverless indexes. # noqa: E501 + List the IDs of vectors in a single namespace of a serverless index. An optional prefix can be passed to limit the results to IDs with a common prefix. Returns up to 100 IDs at a time by default in sorted order (bitwise \"C\" collation). If the `limit` parameter is set, `list` returns up to that number of IDs instead. Whenever there are additional IDs to return, the response also includes a `pagination_token` that you can use to get the next batch of IDs. When the response does not include a `pagination_token`, there are no more IDs to return. For guidance and examples, see [List record IDs](https://docs.pinecone.io/guides/manage-data/list-record-ids). **Note:** `list` is supported only for serverless indexes. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -350,7 +350,7 @@ def __list_vectors(self, **kwargs: ExtraOpenApiKwargsTypedDict): def __query_vectors(self, query_request, **kwargs: ExtraOpenApiKwargsTypedDict): """Search with a vector # noqa: E501 - Search a namespace using a query vector. It retrieves the ids of the most similar items in a namespace, along with their similarity scores. For guidance and examples, see [Query data](https://docs.pinecone.io/guides/data/query-data). # noqa: E501 + Search a namespace using a query vector. It retrieves the ids of the most similar items in a namespace, along with their similarity scores. For guidance and examples, see [Search](https://docs.pinecone.io/guides/search/semantic-search). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -421,7 +421,7 @@ def __search_records_namespace( ): """Search with text # noqa: E501 - Search a namespace with a query text, query vector, or record ID and return the most similar records, along with their similarity scores. Optionally, rerank the initial results based on their relevance to the query. Searching with text is supported only for [indexes with integrated embedding](https://docs.pinecone.io/guides/indexes/create-an-index#integrated-embedding). Searching with a query vector or record ID is supported for all indexes. For guidance and examples, see [Query data](https://docs.pinecone.io/guides/data/query-data). # noqa: E501 + Search a namespace with a query text, query vector, or record ID and return the most similar records, along with their similarity scores. Optionally, rerank the initial results based on their relevance to the query. Searching with text is supported only for [indexes with integrated embedding](https://docs.pinecone.io/guides/indexes/create-an-index#integrated-embedding). Searching with a query vector or record ID is supported for all indexes. For guidance and examples, see [Search](https://docs.pinecone.io/guides/search/semantic-search). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -495,7 +495,7 @@ def __search_records_namespace( def __update_vector(self, update_request, **kwargs: ExtraOpenApiKwargsTypedDict): """Update a vector # noqa: E501 - Update a vector in a namespace. If a value is included, it will overwrite the previous value. If a `set_metadata` is included, the values of the fields specified in it will be added or overwrite the previous value. For guidance and examples, see [Update data](https://docs.pinecone.io/guides/data/update-data). # noqa: E501 + Update a vector in a namespace. If a value is included, it will overwrite the previous value. If a `set_metadata` is included, the values of the fields specified in it will be added or overwrite the previous value. For guidance and examples, see [Update data](https://docs.pinecone.io/guides/manage-data/update-data). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -566,7 +566,7 @@ def __upsert_records_namespace( ): """Upsert text # noqa: E501 - Upsert text into a namespace. Pinecone converts the text to vectors automatically using the hosted embedding model associated with the index. Upserting text is supported only for [indexes with integrated embedding](https://docs.pinecone.io/reference/api/2025-01/control-plane/create_for_model). For guidance and examples, see [Upsert data](https://docs.pinecone.io/guides/data/upsert-data#upsert-text). # noqa: E501 + Upsert text into a namespace. Pinecone converts the text to vectors automatically using the hosted embedding model associated with the index. Upserting text is supported only for [indexes with integrated embedding](https://docs.pinecone.io/reference/api/2025-01/control-plane/create_for_model). For guidance and examples, see [Upsert data](https://docs.pinecone.io/guides/index-data/upsert-data#upsert-text). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -637,7 +637,7 @@ def __upsert_records_namespace( def __upsert_vectors(self, upsert_request, **kwargs: ExtraOpenApiKwargsTypedDict): """Upsert vectors # noqa: E501 - Upsert vectors into a namespace. If a new value is upserted for an existing vector ID, it will overwrite the previous value. For guidance and examples, see [Upsert data](https://docs.pinecone.io/guides/data/upsert-data#upsert-vectors). # noqa: E501 + Upsert vectors into a namespace. If a new value is upserted for an existing vector ID, it will overwrite the previous value. For guidance and examples, see [Upsert data](https://docs.pinecone.io/guides/index-data/upsert-data#upsert-vectors). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -718,7 +718,7 @@ def __init__(self, api_client=None) -> None: async def __delete_vectors(self, delete_request, **kwargs): """Delete vectors # noqa: E501 - Delete vectors by id from a single namespace. For guidance and examples, see [Delete data](https://docs.pinecone.io/guides/data/delete-data). # noqa: E501 + Delete vectors by id from a single namespace. For guidance and examples, see [Delete data](https://docs.pinecone.io/guides/manage-data/delete-data). # noqa: E501 Args: @@ -842,7 +842,7 @@ async def __describe_index_stats(self, describe_index_stats_request, **kwargs): async def __fetch_vectors(self, ids, **kwargs): """Fetch vectors # noqa: E501 - Look up and return vectors by ID from a single namespace. The returned vectors include the vector data and/or metadata. For guidance and examples, see [Fetch data](https://docs.pinecone.io/guides/data/fetch-data). # noqa: E501 + Look up and return vectors by ID from a single namespace. The returned vectors include the vector data and/or metadata. For guidance and examples, see [Fetch data](https://docs.pinecone.io/guides/manage-data/fetch-data). # noqa: E501 Args: @@ -905,7 +905,7 @@ async def __fetch_vectors(self, ids, **kwargs): async def __list_vectors(self, **kwargs): """List vector IDs # noqa: E501 - List the IDs of vectors in a single namespace of a serverless index. An optional prefix can be passed to limit the results to IDs with a common prefix. Returns up to 100 IDs at a time by default in sorted order (bitwise \"C\" collation). If the `limit` parameter is set, `list` returns up to that number of IDs instead. Whenever there are additional IDs to return, the response also includes a `pagination_token` that you can use to get the next batch of IDs. When the response does not include a `pagination_token`, there are no more IDs to return. For guidance and examples, see [List record IDs](https://docs.pinecone.io/guides/data/list-record-ids). **Note:** `list` is supported only for serverless indexes. # noqa: E501 + List the IDs of vectors in a single namespace of a serverless index. An optional prefix can be passed to limit the results to IDs with a common prefix. Returns up to 100 IDs at a time by default in sorted order (bitwise \"C\" collation). If the `limit` parameter is set, `list` returns up to that number of IDs instead. Whenever there are additional IDs to return, the response also includes a `pagination_token` that you can use to get the next batch of IDs. When the response does not include a `pagination_token`, there are no more IDs to return. For guidance and examples, see [List record IDs](https://docs.pinecone.io/guides/manage-data/list-record-ids). **Note:** `list` is supported only for serverless indexes. # noqa: E501 @@ -983,7 +983,7 @@ async def __list_vectors(self, **kwargs): async def __query_vectors(self, query_request, **kwargs): """Search with a vector # noqa: E501 - Search a namespace using a query vector. It retrieves the ids of the most similar items in a namespace, along with their similarity scores. For guidance and examples, see [Query data](https://docs.pinecone.io/guides/data/query-data). # noqa: E501 + Search a namespace using a query vector. It retrieves the ids of the most similar items in a namespace, along with their similarity scores. For guidance and examples, see [Search](https://docs.pinecone.io/guides/search/semantic-search). # noqa: E501 Args: @@ -1045,7 +1045,7 @@ async def __query_vectors(self, query_request, **kwargs): async def __search_records_namespace(self, namespace, search_records_request, **kwargs): """Search with text # noqa: E501 - Search a namespace with a query text, query vector, or record ID and return the most similar records, along with their similarity scores. Optionally, rerank the initial results based on their relevance to the query. Searching with text is supported only for [indexes with integrated embedding](https://docs.pinecone.io/guides/indexes/create-an-index#integrated-embedding). Searching with a query vector or record ID is supported for all indexes. For guidance and examples, see [Query data](https://docs.pinecone.io/guides/data/query-data). # noqa: E501 + Search a namespace with a query text, query vector, or record ID and return the most similar records, along with their similarity scores. Optionally, rerank the initial results based on their relevance to the query. Searching with text is supported only for [indexes with integrated embedding](https://docs.pinecone.io/guides/indexes/create-an-index#integrated-embedding). Searching with a query vector or record ID is supported for all indexes. For guidance and examples, see [Search](https://docs.pinecone.io/guides/search/semantic-search). # noqa: E501 Args: @@ -1112,7 +1112,7 @@ async def __search_records_namespace(self, namespace, search_records_request, ** async def __update_vector(self, update_request, **kwargs): """Update a vector # noqa: E501 - Update a vector in a namespace. If a value is included, it will overwrite the previous value. If a `set_metadata` is included, the values of the fields specified in it will be added or overwrite the previous value. For guidance and examples, see [Update data](https://docs.pinecone.io/guides/data/update-data). # noqa: E501 + Update a vector in a namespace. If a value is included, it will overwrite the previous value. If a `set_metadata` is included, the values of the fields specified in it will be added or overwrite the previous value. For guidance and examples, see [Update data](https://docs.pinecone.io/guides/manage-data/update-data). # noqa: E501 Args: @@ -1174,7 +1174,7 @@ async def __update_vector(self, update_request, **kwargs): async def __upsert_records_namespace(self, namespace, upsert_record, **kwargs): """Upsert text # noqa: E501 - Upsert text into a namespace. Pinecone converts the text to vectors automatically using the hosted embedding model associated with the index. Upserting text is supported only for [indexes with integrated embedding](https://docs.pinecone.io/reference/api/2025-01/control-plane/create_for_model). For guidance and examples, see [Upsert data](https://docs.pinecone.io/guides/data/upsert-data#upsert-text). # noqa: E501 + Upsert text into a namespace. Pinecone converts the text to vectors automatically using the hosted embedding model associated with the index. Upserting text is supported only for [indexes with integrated embedding](https://docs.pinecone.io/reference/api/2025-01/control-plane/create_for_model). For guidance and examples, see [Upsert data](https://docs.pinecone.io/guides/index-data/upsert-data#upsert-text). # noqa: E501 Args: @@ -1238,7 +1238,7 @@ async def __upsert_records_namespace(self, namespace, upsert_record, **kwargs): async def __upsert_vectors(self, upsert_request, **kwargs): """Upsert vectors # noqa: E501 - Upsert vectors into a namespace. If a new value is upserted for an existing vector ID, it will overwrite the previous value. For guidance and examples, see [Upsert data](https://docs.pinecone.io/guides/data/upsert-data#upsert-vectors). # noqa: E501 + Upsert vectors into a namespace. If a new value is upserted for an existing vector ID, it will overwrite the previous value. For guidance and examples, see [Upsert data](https://docs.pinecone.io/guides/index-data/upsert-data#upsert-vectors). # noqa: E501 Args: diff --git a/pinecone/core/openapi/db_data/apis/__init__.py b/pinecone/core/openapi/db_data/apis/__init__.py index 3d7112fdc..a5caa981e 100644 --- a/pinecone/core/openapi/db_data/apis/__init__.py +++ b/pinecone/core/openapi/db_data/apis/__init__.py @@ -14,4 +14,5 @@ # Import APIs into API package: from pinecone.core.openapi.db_data.api.bulk_operations_api import BulkOperationsApi +from pinecone.core.openapi.db_data.api.namespace_operations_api import NamespaceOperationsApi from pinecone.core.openapi.db_data.api.vector_operations_api import VectorOperationsApi diff --git a/pinecone/core/openapi/db_data/model/delete_request.py b/pinecone/core/openapi/db_data/model/delete_request.py index bfb15b874..488554586 100644 --- a/pinecone/core/openapi/db_data/model/delete_request.py +++ b/pinecone/core/openapi/db_data/model/delete_request.py @@ -144,7 +144,7 @@ def _from_openapi_data(cls: Type[T], *args, **kwargs) -> T: # noqa: E501 ids ([str]): Vectors to delete. [optional] # noqa: E501 delete_all (bool): This indicates that all vectors in the index namespace should be deleted. [optional] if omitted the server will use the default value of False. # noqa: E501 namespace (str): The namespace to delete vectors from, if applicable. [optional] # noqa: E501 - filter ({str: (bool, dict, float, int, list, str, none_type)}): If specified, the metadata filter here will be used to select the vectors to delete. This is mutually exclusive with specifying ids to delete in the ids param or using delete_all=True. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). Serverless indexes do not support delete by metadata. Instead, you can use the `list` operation to fetch the vector IDs based on their common ID prefix and then delete the records by ID. [optional] # noqa: E501 + filter ({str: (bool, dict, float, int, list, str, none_type)}): If specified, the metadata filter here will be used to select the vectors to delete. This is mutually exclusive with specifying ids to delete in the ids param or using delete_all=True. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). Serverless indexes do not support delete by metadata. Instead, you can use the `list` operation to fetch the vector IDs based on their common ID prefix and then delete the records by ID. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -231,7 +231,7 @@ def __init__(self, *args, **kwargs) -> None: # noqa: E501 ids ([str]): Vectors to delete. [optional] # noqa: E501 delete_all (bool): This indicates that all vectors in the index namespace should be deleted. [optional] if omitted the server will use the default value of False. # noqa: E501 namespace (str): The namespace to delete vectors from, if applicable. [optional] # noqa: E501 - filter ({str: (bool, dict, float, int, list, str, none_type)}): If specified, the metadata filter here will be used to select the vectors to delete. This is mutually exclusive with specifying ids to delete in the ids param or using delete_all=True. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). Serverless indexes do not support delete by metadata. Instead, you can use the `list` operation to fetch the vector IDs based on their common ID prefix and then delete the records by ID. [optional] # noqa: E501 + filter ({str: (bool, dict, float, int, list, str, none_type)}): If specified, the metadata filter here will be used to select the vectors to delete. This is mutually exclusive with specifying ids to delete in the ids param or using delete_all=True. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). Serverless indexes do not support delete by metadata. Instead, you can use the `list` operation to fetch the vector IDs based on their common ID prefix and then delete the records by ID. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/pinecone/core/openapi/db_data/model/describe_index_stats_request.py b/pinecone/core/openapi/db_data/model/describe_index_stats_request.py index e024d5f33..3ea3bb6d9 100644 --- a/pinecone/core/openapi/db_data/model/describe_index_stats_request.py +++ b/pinecone/core/openapi/db_data/model/describe_index_stats_request.py @@ -135,7 +135,7 @@ def _from_openapi_data(cls: Type[T], *args, **kwargs) -> T: # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - filter ({str: (bool, dict, float, int, list, str, none_type)}): If this parameter is present, the operation only returns statistics for vectors that satisfy the filter. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). Serverless indexes do not support filtering `describe_index_stats` by metadata. [optional] # noqa: E501 + filter ({str: (bool, dict, float, int, list, str, none_type)}): If this parameter is present, the operation only returns statistics for vectors that satisfy the filter. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). Serverless indexes do not support filtering `describe_index_stats` by metadata. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -219,7 +219,7 @@ def __init__(self, *args, **kwargs) -> None: # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - filter ({str: (bool, dict, float, int, list, str, none_type)}): If this parameter is present, the operation only returns statistics for vectors that satisfy the filter. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). Serverless indexes do not support filtering `describe_index_stats` by metadata. [optional] # noqa: E501 + filter ({str: (bool, dict, float, int, list, str, none_type)}): If this parameter is present, the operation only returns statistics for vectors that satisfy the filter. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). Serverless indexes do not support filtering `describe_index_stats` by metadata. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/pinecone/core/openapi/db_data/model/list_namespaces_response.py b/pinecone/core/openapi/db_data/model/list_namespaces_response.py new file mode 100644 index 000000000..3d75a7279 --- /dev/null +++ b/pinecone/core/openapi/db_data/model/list_namespaces_response.py @@ -0,0 +1,274 @@ +""" +Pinecone Data Plane API + +Pinecone is a vector database that makes it easy to search and retrieve billions of high-dimensional vectors. # noqa: E501 + +This file is @generated using OpenAPI. + +The version of the OpenAPI document: 2025-04 +Contact: support@pinecone.io +""" + +from pinecone.openapi_support.model_utils import ( # noqa: F401 + PineconeApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + OpenApiModel, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, +) +from pinecone.openapi_support.exceptions import PineconeApiAttributeError + + +def lazy_import(): + from pinecone.core.openapi.db_data.model.namespace_description import NamespaceDescription + from pinecone.core.openapi.db_data.model.pagination import Pagination + + globals()["NamespaceDescription"] = NamespaceDescription + globals()["Pagination"] = Pagination + + +from typing import Dict, Literal, Tuple, Set, Any, Type, TypeVar +from pinecone.openapi_support import PropertyValidationTypedDict, cached_class_property + +T = TypeVar("T", bound="ListNamespacesResponse") + + +class ListNamespacesResponse(ModelNormal): + """NOTE: This class is @generated using OpenAPI. + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + _data_store: Dict[str, Any] + _check_type: bool + + allowed_values: Dict[Tuple[str, ...], Dict[str, Any]] = {} + + validations: Dict[Tuple[str, ...], PropertyValidationTypedDict] = {} + + @cached_class_property + def additional_properties_type(cls): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return (bool, dict, float, int, list, str, none_type) # noqa: E501 + + _nullable = False + + @cached_class_property + def openapi_types(cls): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + "namespaces": ([NamespaceDescription],), # noqa: E501 + "pagination": (Pagination,), # noqa: E501 + } + + @cached_class_property + def discriminator(cls): + return None + + attribute_map: Dict[str, str] = { + "namespaces": "namespaces", # noqa: E501 + "pagination": "pagination", # noqa: E501 + } + + read_only_vars: Set[str] = set([]) + + _composed_schemas: Dict[Literal["allOf", "oneOf", "anyOf"], Any] = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls: Type[T], *args, **kwargs) -> T: # noqa: E501 + """ListNamespacesResponse - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + namespaces ([NamespaceDescription]): The list of namespaces belonging to this index. [optional] # noqa: E501 + pagination (Pagination): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise PineconeApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % (args, self.__class__.__name__), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs) -> None: # noqa: E501 + """ListNamespacesResponse - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + namespaces ([NamespaceDescription]): The list of namespaces belonging to this index. [optional] # noqa: E501 + pagination (Pagination): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise PineconeApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % (args, self.__class__.__name__), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise PineconeApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes." + ) diff --git a/pinecone/core/openapi/db_data/model/namespace_description.py b/pinecone/core/openapi/db_data/model/namespace_description.py new file mode 100644 index 000000000..099b14b2f --- /dev/null +++ b/pinecone/core/openapi/db_data/model/namespace_description.py @@ -0,0 +1,264 @@ +""" +Pinecone Data Plane API + +Pinecone is a vector database that makes it easy to search and retrieve billions of high-dimensional vectors. # noqa: E501 + +This file is @generated using OpenAPI. + +The version of the OpenAPI document: 2025-04 +Contact: support@pinecone.io +""" + +from pinecone.openapi_support.model_utils import ( # noqa: F401 + PineconeApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + OpenApiModel, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, +) +from pinecone.openapi_support.exceptions import PineconeApiAttributeError + + +from typing import Dict, Literal, Tuple, Set, Any, Type, TypeVar +from pinecone.openapi_support import PropertyValidationTypedDict, cached_class_property + +T = TypeVar("T", bound="NamespaceDescription") + + +class NamespaceDescription(ModelNormal): + """NOTE: This class is @generated using OpenAPI. + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + _data_store: Dict[str, Any] + _check_type: bool + + allowed_values: Dict[Tuple[str, ...], Dict[str, Any]] = {} + + validations: Dict[Tuple[str, ...], PropertyValidationTypedDict] = {} + + @cached_class_property + def additional_properties_type(cls): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + return (bool, dict, float, int, list, str, none_type) # noqa: E501 + + _nullable = False + + @cached_class_property + def openapi_types(cls): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "name": (str,), # noqa: E501 + "record_count": (int,), # noqa: E501 + } + + @cached_class_property + def discriminator(cls): + return None + + attribute_map: Dict[str, str] = { + "name": "name", # noqa: E501 + "record_count": "record_count", # noqa: E501 + } + + read_only_vars: Set[str] = set([]) + + _composed_schemas: Dict[Literal["allOf", "oneOf", "anyOf"], Any] = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls: Type[T], *args, **kwargs) -> T: # noqa: E501 + """NamespaceDescription - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + name (str): The name of the namespace. [optional] # noqa: E501 + record_count (int): The total amount of records within the namespace. [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise PineconeApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % (args, self.__class__.__name__), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs) -> None: # noqa: E501 + """NamespaceDescription - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + name (str): The name of the namespace. [optional] # noqa: E501 + record_count (int): The total amount of records within the namespace. [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise PineconeApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % (args, self.__class__.__name__), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise PineconeApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes." + ) diff --git a/pinecone/core/openapi/db_data/model/query_request.py b/pinecone/core/openapi/db_data/model/query_request.py index 7aa460c03..f1aaf07e5 100644 --- a/pinecone/core/openapi/db_data/model/query_request.py +++ b/pinecone/core/openapi/db_data/model/query_request.py @@ -170,7 +170,7 @@ def _from_openapi_data(cls: Type[T], top_k, *args, **kwargs) -> T: # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) namespace (str): The namespace to query. [optional] # noqa: E501 - filter ({str: (bool, dict, float, int, list, str, none_type)}): The filter to apply. You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). [optional] # noqa: E501 + filter ({str: (bool, dict, float, int, list, str, none_type)}): The filter to apply. You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). [optional] # noqa: E501 include_values (bool): Indicates whether vector values are included in the response. [optional] if omitted the server will use the default value of False. # noqa: E501 include_metadata (bool): Indicates whether metadata is included in the response as well as the ids. [optional] if omitted the server will use the default value of False. # noqa: E501 queries ([QueryVector]): DEPRECATED. Use `vector` or `id` instead. [optional] # noqa: E501 @@ -265,7 +265,7 @@ def __init__(self, top_k, *args, **kwargs) -> None: # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) namespace (str): The namespace to query. [optional] # noqa: E501 - filter ({str: (bool, dict, float, int, list, str, none_type)}): The filter to apply. You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). [optional] # noqa: E501 + filter ({str: (bool, dict, float, int, list, str, none_type)}): The filter to apply. You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). [optional] # noqa: E501 include_values (bool): Indicates whether vector values are included in the response. [optional] if omitted the server will use the default value of False. # noqa: E501 include_metadata (bool): Indicates whether metadata is included in the response as well as the ids. [optional] if omitted the server will use the default value of False. # noqa: E501 queries ([QueryVector]): DEPRECATED. Use `vector` or `id` instead. [optional] # noqa: E501 diff --git a/pinecone/core/openapi/db_data/model/search_records_request_query.py b/pinecone/core/openapi/db_data/model/search_records_request_query.py index 6898e2006..caa7ee403 100644 --- a/pinecone/core/openapi/db_data/model/search_records_request_query.py +++ b/pinecone/core/openapi/db_data/model/search_records_request_query.py @@ -154,7 +154,7 @@ def _from_openapi_data(cls: Type[T], top_k, *args, **kwargs) -> T: # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - filter ({str: (bool, dict, float, int, list, str, none_type)}): The filter to apply. You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). [optional] # noqa: E501 + filter ({str: (bool, dict, float, int, list, str, none_type)}): The filter to apply. You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). [optional] # noqa: E501 inputs ({str: (bool, dict, float, int, list, str, none_type)}): [optional] # noqa: E501 vector (SearchRecordsVector): [optional] # noqa: E501 id (str): The unique ID of the vector to be used as a query vector. [optional] # noqa: E501 @@ -245,7 +245,7 @@ def __init__(self, top_k, *args, **kwargs) -> None: # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - filter ({str: (bool, dict, float, int, list, str, none_type)}): The filter to apply. You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/data/understanding-metadata). [optional] # noqa: E501 + filter ({str: (bool, dict, float, int, list, str, none_type)}): The filter to apply. You can use vector metadata to limit your search. See [Understanding metadata](https://docs.pinecone.io/guides/index-data/indexing-overview#metadata). [optional] # noqa: E501 inputs ({str: (bool, dict, float, int, list, str, none_type)}): [optional] # noqa: E501 vector (SearchRecordsVector): [optional] # noqa: E501 id (str): The unique ID of the vector to be used as a query vector. [optional] # noqa: E501 diff --git a/pinecone/core/openapi/db_data/model/search_records_request_rerank.py b/pinecone/core/openapi/db_data/model/search_records_request_rerank.py index 3138c6014..2684894a6 100644 --- a/pinecone/core/openapi/db_data/model/search_records_request_rerank.py +++ b/pinecone/core/openapi/db_data/model/search_records_request_rerank.py @@ -113,8 +113,8 @@ def _from_openapi_data(cls: Type[T], model, rank_fields, *args, **kwargs) -> T: """SearchRecordsRequestRerank - a model defined in OpenAPI Args: - model (str): The name of the [reranking model](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models) to use. - rank_fields ([str]): The field(s) to consider for reranking. If not provided, the default is `[\"text\"]`. The number of fields supported is [model-specific](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models). + model (str): The name of the [reranking model](https://docs.pinecone.io/guides/search/rerank-results#reranking-models) to use. + rank_fields ([str]): The field(s) to consider for reranking. If not provided, the default is `[\"text\"]`. The number of fields supported is [model-specific](https://docs.pinecone.io/guides/search/rerank-results#reranking-models). Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -148,7 +148,7 @@ def _from_openapi_data(cls: Type[T], model, rank_fields, *args, **kwargs) -> T: through its discriminator because we passed in _visited_composed_classes = (Animal,) top_n (int): The number of top results to return after reranking. Defaults to top_k. [optional] # noqa: E501 - parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models) for available model parameters. [optional] # noqa: E501 + parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/search/rerank-results#reranking-models) for available model parameters. [optional] # noqa: E501 query (str): The query to rerank documents against. If a specific rerank query is specified, it overwrites the query input that was provided at the top level. [optional] # noqa: E501 """ @@ -205,8 +205,8 @@ def __init__(self, model, rank_fields, *args, **kwargs) -> None: # noqa: E501 """SearchRecordsRequestRerank - a model defined in OpenAPI Args: - model (str): The name of the [reranking model](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models) to use. - rank_fields ([str]): The field(s) to consider for reranking. If not provided, the default is `[\"text\"]`. The number of fields supported is [model-specific](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models). + model (str): The name of the [reranking model](https://docs.pinecone.io/guides/search/rerank-results#reranking-models) to use. + rank_fields ([str]): The field(s) to consider for reranking. If not provided, the default is `[\"text\"]`. The number of fields supported is [model-specific](https://docs.pinecone.io/guides/search/rerank-results#reranking-models). Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -240,7 +240,7 @@ def __init__(self, model, rank_fields, *args, **kwargs) -> None: # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) top_n (int): The number of top results to return after reranking. Defaults to top_k. [optional] # noqa: E501 - parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models) for available model parameters. [optional] # noqa: E501 + parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/search/rerank-results#reranking-models) for available model parameters. [optional] # noqa: E501 query (str): The query to rerank documents against. If a specific rerank query is specified, it overwrites the query input that was provided at the top level. [optional] # noqa: E501 """ diff --git a/pinecone/core/openapi/db_data/model/start_import_request.py b/pinecone/core/openapi/db_data/model/start_import_request.py index 1a4a96298..e08290479 100644 --- a/pinecone/core/openapi/db_data/model/start_import_request.py +++ b/pinecone/core/openapi/db_data/model/start_import_request.py @@ -120,7 +120,7 @@ def _from_openapi_data(cls: Type[T], uri, *args, **kwargs) -> T: # noqa: E501 """StartImportRequest - a model defined in OpenAPI Args: - uri (str): The [URI prefix](https://docs.pinecone.io/guides/data/understanding-imports#directory-structure) under which the data to import is available. All data within this prefix will be listed then imported into the target index. Currently only `s3://` URIs are supported. + uri (str): The [URI prefix](https://docs.pinecone.io/guides/index-data/import-data#prepare-your-data) under which the data to import is available. All data within this prefix will be listed then imported into the target index. Currently only `s3://` URIs are supported. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -209,7 +209,7 @@ def __init__(self, uri, *args, **kwargs) -> None: # noqa: E501 """StartImportRequest - a model defined in OpenAPI Args: - uri (str): The [URI prefix](https://docs.pinecone.io/guides/data/understanding-imports#directory-structure) under which the data to import is available. All data within this prefix will be listed then imported into the target index. Currently only `s3://` URIs are supported. + uri (str): The [URI prefix](https://docs.pinecone.io/guides/index-data/import-data#prepare-your-data) under which the data to import is available. All data within this prefix will be listed then imported into the target index. Currently only `s3://` URIs are supported. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types diff --git a/pinecone/core/openapi/db_data/models/__init__.py b/pinecone/core/openapi/db_data/models/__init__.py index b5fa9fa0e..34e9a6d88 100644 --- a/pinecone/core/openapi/db_data/models/__init__.py +++ b/pinecone/core/openapi/db_data/models/__init__.py @@ -20,7 +20,9 @@ from pinecone.core.openapi.db_data.model.index_description import IndexDescription from pinecone.core.openapi.db_data.model.list_imports_response import ListImportsResponse from pinecone.core.openapi.db_data.model.list_item import ListItem +from pinecone.core.openapi.db_data.model.list_namespaces_response import ListNamespacesResponse from pinecone.core.openapi.db_data.model.list_response import ListResponse +from pinecone.core.openapi.db_data.model.namespace_description import NamespaceDescription from pinecone.core.openapi.db_data.model.namespace_summary import NamespaceSummary from pinecone.core.openapi.db_data.model.pagination import Pagination from pinecone.core.openapi.db_data.model.protobuf_any import ProtobufAny diff --git a/pinecone/core/openapi/inference/api/inference_api.py b/pinecone/core/openapi/inference/api/inference_api.py index 3c9ec25be..a19557f82 100644 --- a/pinecone/core/openapi/inference/api/inference_api.py +++ b/pinecone/core/openapi/inference/api/inference_api.py @@ -46,7 +46,7 @@ def __init__(self, api_client=None) -> None: def __embed(self, **kwargs: ExtraOpenApiKwargsTypedDict): """Generate vectors # noqa: E501 - Generate vector embeddings for input data. This endpoint uses [Pinecone Inference](https://docs.pinecone.io/guides/inference/understanding-inference). For guidance and examples, see [Embed data](https://docs.pinecone.io/guides/inference/generate-embeddings). # noqa: E501 + Generate vector embeddings for input data. This endpoint uses [Pinecone Inference](https://docs.pinecone.io/guides/index-data/indexing-overview#vector-embedding). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -253,7 +253,7 @@ def __list_models(self, **kwargs: ExtraOpenApiKwargsTypedDict): def __rerank(self, **kwargs: ExtraOpenApiKwargsTypedDict): """Rerank documents # noqa: E501 - Rerank documents according to their relevance to a query. For guidance and examples, see [Rerank documents](https://docs.pinecone.io/guides/inference/rerank). # noqa: E501 + Rerank documents according to their relevance to a query. For guidance and examples, see [Rerank results](https://docs.pinecone.io/guides/search/rerank-results). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -332,7 +332,7 @@ def __init__(self, api_client=None) -> None: async def __embed(self, **kwargs): """Generate vectors # noqa: E501 - Generate vector embeddings for input data. This endpoint uses [Pinecone Inference](https://docs.pinecone.io/guides/inference/understanding-inference). For guidance and examples, see [Embed data](https://docs.pinecone.io/guides/inference/generate-embeddings). # noqa: E501 + Generate vector embeddings for input data. This endpoint uses [Pinecone Inference](https://docs.pinecone.io/guides/index-data/indexing-overview#vector-embedding). # noqa: E501 @@ -518,7 +518,7 @@ async def __list_models(self, **kwargs): async def __rerank(self, **kwargs): """Rerank documents # noqa: E501 - Rerank documents according to their relevance to a query. For guidance and examples, see [Rerank documents](https://docs.pinecone.io/guides/inference/rerank). # noqa: E501 + Rerank documents according to their relevance to a query. For guidance and examples, see [Rerank results](https://docs.pinecone.io/guides/search/rerank-results). # noqa: E501 diff --git a/pinecone/core/openapi/inference/model/embed_request.py b/pinecone/core/openapi/inference/model/embed_request.py index 58e312908..ba7e786b3 100644 --- a/pinecone/core/openapi/inference/model/embed_request.py +++ b/pinecone/core/openapi/inference/model/embed_request.py @@ -117,7 +117,7 @@ def _from_openapi_data(cls: Type[T], model, inputs, *args, **kwargs) -> T: # no """EmbedRequest - a model defined in OpenAPI Args: - model (str): The [model](https://docs.pinecone.io/guides/inference/understanding-inference#embedding-models) to use for embedding generation. + model (str): The [model](https://docs.pinecone.io/guides/index-data/create-an-index#embedding-models) to use for embedding generation. inputs ([EmbedRequestInputs]): List of inputs to generate embeddings for. Keyword Args: @@ -151,7 +151,7 @@ def _from_openapi_data(cls: Type[T], model, inputs, *args, **kwargs) -> T: # no Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/inference/understanding-inference#embedding-models) for available model parameters. [optional] # noqa: E501 + parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/index-data/create-an-index#embedding-models) for available model parameters. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -207,7 +207,7 @@ def __init__(self, model, inputs, *args, **kwargs) -> None: # noqa: E501 """EmbedRequest - a model defined in OpenAPI Args: - model (str): The [model](https://docs.pinecone.io/guides/inference/understanding-inference#embedding-models) to use for embedding generation. + model (str): The [model](https://docs.pinecone.io/guides/index-data/create-an-index#embedding-models) to use for embedding generation. inputs ([EmbedRequestInputs]): List of inputs to generate embeddings for. Keyword Args: @@ -241,7 +241,7 @@ def __init__(self, model, inputs, *args, **kwargs) -> None: # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/inference/understanding-inference#embedding-models) for available model parameters. [optional] # noqa: E501 + parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/index-data/create-an-index#embedding-models) for available model parameters. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/pinecone/core/openapi/inference/model/model_info.py b/pinecone/core/openapi/inference/model/model_info.py index 2ce733cc0..6daa37b3f 100644 --- a/pinecone/core/openapi/inference/model/model_info.py +++ b/pinecone/core/openapi/inference/model/model_info.py @@ -77,9 +77,9 @@ class ModelInfo(ModelNormal): } validations: Dict[Tuple[str, ...], PropertyValidationTypedDict] = { - ("dimension",): {"inclusive_maximum": 20000, "inclusive_minimum": 1}, - ("sequence_length",): {"inclusive_minimum": 1}, - ("batch_size",): {"inclusive_minimum": 1}, + ("default_dimension",): {"inclusive_maximum": 20000, "inclusive_minimum": 1}, + ("max_sequence_length",): {"inclusive_minimum": 1}, + ("max_batch_size",): {"inclusive_minimum": 1}, } @cached_class_property @@ -105,15 +105,17 @@ def openapi_types(cls): """ lazy_import() return { - "name": (str,), # noqa: E501 + "model": (str,), # noqa: E501 "short_description": (str,), # noqa: E501 "type": (str,), # noqa: E501 "supported_parameters": ([ModelInfoSupportedParameter],), # noqa: E501 "vector_type": (str,), # noqa: E501 - "dimension": (int,), # noqa: E501 + "default_dimension": (int,), # noqa: E501 "modality": (str,), # noqa: E501 - "sequence_length": (int,), # noqa: E501 - "batch_size": (int,), # noqa: E501 + "max_sequence_length": (int,), # noqa: E501 + "max_batch_size": (int,), # noqa: E501 + "provider_name": (str,), # noqa: E501 + "supported_dimensions": ([int],), # noqa: E501 "supported_metrics": (ModelInfoSupportedMetrics,), # noqa: E501 } @@ -122,15 +124,17 @@ def discriminator(cls): return None attribute_map: Dict[str, str] = { - "name": "name", # noqa: E501 + "model": "model", # noqa: E501 "short_description": "short_description", # noqa: E501 "type": "type", # noqa: E501 "supported_parameters": "supported_parameters", # noqa: E501 "vector_type": "vector_type", # noqa: E501 - "dimension": "dimension", # noqa: E501 + "default_dimension": "default_dimension", # noqa: E501 "modality": "modality", # noqa: E501 - "sequence_length": "sequence_length", # noqa: E501 - "batch_size": "batch_size", # noqa: E501 + "max_sequence_length": "max_sequence_length", # noqa: E501 + "max_batch_size": "max_batch_size", # noqa: E501 + "provider_name": "provider_name", # noqa: E501 + "supported_dimensions": "supported_dimensions", # noqa: E501 "supported_metrics": "supported_metrics", # noqa: E501 } @@ -141,12 +145,12 @@ def discriminator(cls): @classmethod @convert_js_args_to_python_args def _from_openapi_data( - cls: Type[T], name, short_description, type, supported_parameters, *args, **kwargs + cls: Type[T], model, short_description, type, supported_parameters, *args, **kwargs ) -> T: # noqa: E501 """ModelInfo - a model defined in OpenAPI Args: - name (str): The name of the model. + model (str): The name of the model. short_description (str): A summary of the model. type (str): The type of model (e.g. 'embed' or 'rerank'). supported_parameters ([ModelInfoSupportedParameter]): @@ -183,10 +187,12 @@ def _from_openapi_data( through its discriminator because we passed in _visited_composed_classes = (Animal,) vector_type (str): Whether the embedding model produces 'dense' or 'sparse' embeddings. [optional] # noqa: E501 - dimension (int): The embedding model dimension (applies to dense embedding models only). [optional] # noqa: E501 + default_dimension (int): The default embedding model dimension (applies to dense embedding models only). [optional] # noqa: E501 modality (str): The modality of the model (e.g. 'text'). [optional] # noqa: E501 - sequence_length (int): The maximum tokens per sequence supported by the model. [optional] # noqa: E501 - batch_size (int): The maximum batch size (number of sequences) supported by the model. [optional] # noqa: E501 + max_sequence_length (int): The maximum tokens per sequence supported by the model. [optional] # noqa: E501 + max_batch_size (int): The maximum batch size (number of sequences) supported by the model. [optional] # noqa: E501 + provider_name (str): The name of the provider of the model. [optional] # noqa: E501 + supported_dimensions ([int]): The list of supported dimensions for the model (applies to dense embedding models only). [optional] # noqa: E501 supported_metrics (ModelInfoSupportedMetrics): [optional] # noqa: E501 """ @@ -213,7 +219,7 @@ def _from_openapi_data( self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.name = name + self.model = model self.short_description = short_description self.type = type self.supported_parameters = supported_parameters @@ -242,12 +248,12 @@ def _from_openapi_data( @convert_js_args_to_python_args def __init__( - self, name, short_description, type, supported_parameters, *args, **kwargs + self, model, short_description, type, supported_parameters, *args, **kwargs ) -> None: # noqa: E501 """ModelInfo - a model defined in OpenAPI Args: - name (str): The name of the model. + model (str): The name of the model. short_description (str): A summary of the model. type (str): The type of model (e.g. 'embed' or 'rerank'). supported_parameters ([ModelInfoSupportedParameter]): @@ -284,10 +290,12 @@ def __init__( through its discriminator because we passed in _visited_composed_classes = (Animal,) vector_type (str): Whether the embedding model produces 'dense' or 'sparse' embeddings. [optional] # noqa: E501 - dimension (int): The embedding model dimension (applies to dense embedding models only). [optional] # noqa: E501 + default_dimension (int): The default embedding model dimension (applies to dense embedding models only). [optional] # noqa: E501 modality (str): The modality of the model (e.g. 'text'). [optional] # noqa: E501 - sequence_length (int): The maximum tokens per sequence supported by the model. [optional] # noqa: E501 - batch_size (int): The maximum batch size (number of sequences) supported by the model. [optional] # noqa: E501 + max_sequence_length (int): The maximum tokens per sequence supported by the model. [optional] # noqa: E501 + max_batch_size (int): The maximum batch size (number of sequences) supported by the model. [optional] # noqa: E501 + provider_name (str): The name of the provider of the model. [optional] # noqa: E501 + supported_dimensions ([int]): The list of supported dimensions for the model (applies to dense embedding models only). [optional] # noqa: E501 supported_metrics (ModelInfoSupportedMetrics): [optional] # noqa: E501 """ @@ -312,7 +320,7 @@ def __init__( self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.name = name + self.model = model self.short_description = short_description self.type = type self.supported_parameters = supported_parameters diff --git a/pinecone/core/openapi/inference/model/rerank_request.py b/pinecone/core/openapi/inference/model/rerank_request.py index 5727a4f7c..d8ce884bf 100644 --- a/pinecone/core/openapi/inference/model/rerank_request.py +++ b/pinecone/core/openapi/inference/model/rerank_request.py @@ -125,7 +125,7 @@ def _from_openapi_data(cls: Type[T], model, query, documents, *args, **kwargs) - """RerankRequest - a model defined in OpenAPI Args: - model (str): The [model](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models) to use for reranking. + model (str): The [model](https://docs.pinecone.io/guides/search/rerank-results#reranking-models) to use for reranking. query (str): The query to rerank documents against. documents ([Document]): The documents to rerank. @@ -162,8 +162,8 @@ def _from_openapi_data(cls: Type[T], model, query, documents, *args, **kwargs) - _visited_composed_classes = (Animal,) top_n (int): The number of results to return sorted by relevance. Defaults to the number of inputs. [optional] # noqa: E501 return_documents (bool): Whether to return the documents in the response. [optional] if omitted the server will use the default value of True. # noqa: E501 - rank_fields ([str]): The field(s) to consider for reranking. If not provided, the default is `[\"text\"]`. The number of fields supported is [model-specific](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models). [optional] if omitted the server will use the default value of ["text"]. # noqa: E501 - parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models) for available model parameters. [optional] # noqa: E501 + rank_fields ([str]): The field(s) to consider for reranking. If not provided, the default is `[\"text\"]`. The number of fields supported is [model-specific](https://docs.pinecone.io/guides/search/rerank-results#reranking-models). [optional] if omitted the server will use the default value of ["text"]. # noqa: E501 + parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/search/rerank-results#reranking-models) for available model parameters. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -220,7 +220,7 @@ def __init__(self, model, query, documents, *args, **kwargs) -> None: # noqa: E """RerankRequest - a model defined in OpenAPI Args: - model (str): The [model](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models) to use for reranking. + model (str): The [model](https://docs.pinecone.io/guides/search/rerank-results#reranking-models) to use for reranking. query (str): The query to rerank documents against. documents ([Document]): The documents to rerank. @@ -257,8 +257,8 @@ def __init__(self, model, query, documents, *args, **kwargs) -> None: # noqa: E _visited_composed_classes = (Animal,) top_n (int): The number of results to return sorted by relevance. Defaults to the number of inputs. [optional] # noqa: E501 return_documents (bool): Whether to return the documents in the response. [optional] if omitted the server will use the default value of True. # noqa: E501 - rank_fields ([str]): The field(s) to consider for reranking. If not provided, the default is `[\"text\"]`. The number of fields supported is [model-specific](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models). [optional] if omitted the server will use the default value of ["text"]. # noqa: E501 - parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/inference/understanding-inference#reranking-models) for available model parameters. [optional] # noqa: E501 + rank_fields ([str]): The field(s) to consider for reranking. If not provided, the default is `[\"text\"]`. The number of fields supported is [model-specific](https://docs.pinecone.io/guides/search/rerank-results#reranking-models). [optional] if omitted the server will use the default value of ["text"]. # noqa: E501 + parameters ({str: (bool, dict, float, int, list, str, none_type)}): Additional model-specific parameters. Refer to the [model guide](https://docs.pinecone.io/guides/search/rerank-results#reranking-models) for available model parameters. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/pinecone/db_control/db_control.py b/pinecone/db_control/db_control.py index 69aef889b..0f11c2093 100644 --- a/pinecone/db_control/db_control.py +++ b/pinecone/db_control/db_control.py @@ -14,10 +14,15 @@ if TYPE_CHECKING: from .resources.sync.index import IndexResource from .resources.sync.collection import CollectionResource + from .resources.sync.restore_job import RestoreJobResource + from .resources.sync.backup import BackupResource + from pinecone.config import Config, OpenApiConfiguration class DBControl: - def __init__(self, config, openapi_config, pool_threads): + def __init__( + self, config: "Config", openapi_config: "OpenApiConfiguration", pool_threads: int + ) -> None: self._config = config """ @private """ @@ -43,6 +48,12 @@ def __init__(self, config, openapi_config, pool_threads): self._collection_resource: Optional["CollectionResource"] = None """ @private """ + self._restore_job_resource: Optional["RestoreJobResource"] = None + """ @private """ + + self._backup_resource: Optional["BackupResource"] = None + """ @private """ + @property def index(self) -> "IndexResource": if self._index_resource is None: @@ -58,3 +69,19 @@ def collection(self) -> "CollectionResource": self._collection_resource = CollectionResource(self._index_api) return self._collection_resource + + @property + def restore_job(self) -> "RestoreJobResource": + if self._restore_job_resource is None: + from .resources.sync.restore_job import RestoreJobResource + + self._restore_job_resource = RestoreJobResource(self._index_api) + return self._restore_job_resource + + @property + def backup(self) -> "BackupResource": + if self._backup_resource is None: + from .resources.sync.backup import BackupResource + + self._backup_resource = BackupResource(self._index_api) + return self._backup_resource diff --git a/pinecone/db_control/db_control_asyncio.py b/pinecone/db_control/db_control_asyncio.py index 91e3f1790..7ae2196ad 100644 --- a/pinecone/db_control/db_control_asyncio.py +++ b/pinecone/db_control/db_control_asyncio.py @@ -14,10 +14,13 @@ if TYPE_CHECKING: from .resources.asyncio.index import IndexResourceAsyncio from .resources.asyncio.collection import CollectionResourceAsyncio + from .resources.asyncio.restore_job import RestoreJobResourceAsyncio + from .resources.asyncio.backup import BackupResourceAsyncio + from pinecone.config import Config, OpenApiConfiguration class DBControlAsyncio: - def __init__(self, config, openapi_config): + def __init__(self, config: "Config", openapi_config: "OpenApiConfiguration") -> None: self._config = config """ @private """ @@ -39,6 +42,12 @@ def __init__(self, config, openapi_config): self._collection_resource: Optional["CollectionResourceAsyncio"] = None """ @private """ + self._restore_job_resource: Optional["RestoreJobResourceAsyncio"] = None + """ @private """ + + self._backup_resource: Optional["BackupResourceAsyncio"] = None + """ @private """ + @property def index(self) -> "IndexResourceAsyncio": if self._index_resource is None: @@ -56,3 +65,19 @@ def collection(self) -> "CollectionResourceAsyncio": self._collection_resource = CollectionResourceAsyncio(self._index_api) return self._collection_resource + + @property + def restore_job(self) -> "RestoreJobResourceAsyncio": + if self._restore_job_resource is None: + from .resources.asyncio.restore_job import RestoreJobResourceAsyncio + + self._restore_job_resource = RestoreJobResourceAsyncio(self._index_api) + return self._restore_job_resource + + @property + def backup(self) -> "BackupResourceAsyncio": + if self._backup_resource is None: + from .resources.asyncio.backup import BackupResourceAsyncio + + self._backup_resource = BackupResourceAsyncio(self._index_api) + return self._backup_resource diff --git a/pinecone/db_control/models/__init__.py b/pinecone/db_control/models/__init__.py index 34003bfe8..02bc7cc62 100644 --- a/pinecone/db_control/models/__init__.py +++ b/pinecone/db_control/models/__init__.py @@ -6,6 +6,11 @@ from .collection_list import CollectionList from .index_model import IndexModel from ...inference.models.index_embed import IndexEmbed +from .backup_model import BackupModel +from .backup_list import BackupList +from pinecone.core.openapi.db_control.model.restore_job_model import RestoreJobModel +from pinecone.core.openapi.db_control.model.restore_job_list import RestoreJobList + __all__ = [ "CollectionDescription", @@ -17,4 +22,8 @@ "CollectionList", "IndexModel", "IndexEmbed", + "BackupModel", + "BackupList", + "RestoreJobModel", + "RestoreJobList", ] diff --git a/pinecone/db_control/models/backup_list.py b/pinecone/db_control/models/backup_list.py new file mode 100644 index 000000000..14f5438a4 --- /dev/null +++ b/pinecone/db_control/models/backup_list.py @@ -0,0 +1,31 @@ +import json +from pinecone.core.openapi.db_control.model.backup_list import BackupList as OpenAPIBackupList +from .backup_model import BackupModel +from typing import List + + +class BackupList: + def __init__(self, backup_list: OpenAPIBackupList): + self._backup_list = backup_list + self._backups = [BackupModel(b) for b in self._backup_list.data] + + def names(self) -> List[str]: + return [i.name for i in self._backups] + + def __getitem__(self, key): + return self.indexes[key] + + def __len__(self): + return len(self._backups) + + def __iter__(self): + return iter(self._backups) + + def __str__(self): + return str(self._backups) + + def __repr__(self): + return json.dumps([i.to_dict() for i in self._backups], indent=4) + + def __getattr__(self, attr): + return getattr(self._backup_list, attr) diff --git a/pinecone/db_control/models/backup_model.py b/pinecone/db_control/models/backup_model.py new file mode 100644 index 000000000..c278692ae --- /dev/null +++ b/pinecone/db_control/models/backup_model.py @@ -0,0 +1,18 @@ +from pinecone.core.openapi.db_control.model.backup_model import BackupModel as OpenAPIBackupModel + + +class BackupModel: + def __init__(self, backup: OpenAPIBackupModel): + self._backup = backup + + def __str__(self): + return str(self._backup) + + def __getattr__(self, attr): + return getattr(self._backup, attr) + + def __getitem__(self, key): + return self.__getattr__(key) + + def to_dict(self): + return self._backup.to_dict() diff --git a/pinecone/db_control/request_factory.py b/pinecone/db_control/request_factory.py index 719f71a15..a5d298ca7 100644 --- a/pinecone/db_control/request_factory.py +++ b/pinecone/db_control/request_factory.py @@ -29,7 +29,9 @@ ) from pinecone.core.openapi.db_control.model.pod_spec import PodSpec as PodSpecModel from pinecone.core.openapi.db_control.model.pod_spec_metadata_config import PodSpecMetadataConfig - +from pinecone.core.openapi.db_control.model.create_index_from_backup_request import ( + CreateIndexFromBackupRequest, +) from pinecone.db_control.models import ServerlessSpec, PodSpec, IndexModel, IndexEmbed from pinecone.db_control.enums import ( @@ -211,6 +213,21 @@ def create_index_for_model_request( return CreateIndexForModelRequest(**args) + @staticmethod + def create_index_from_backup_request( + name: str, + deletion_protection: Optional[Union[DeletionProtection, str]] = DeletionProtection.DISABLED, + tags: Optional[Dict[str, str]] = None, + ) -> CreateIndexFromBackupRequest: + if deletion_protection is not None: + dp = PineconeDBControlRequestFactory.__parse_deletion_protection(deletion_protection) + else: + dp = None + + tags_obj = PineconeDBControlRequestFactory.__parse_tags(tags) + + return CreateIndexFromBackupRequest(name=name, deletion_protection=dp, tags=tags_obj) + @staticmethod def configure_index_request( description: IndexModel, diff --git a/pinecone/db_control/resources/asyncio/backup.py b/pinecone/db_control/resources/asyncio/backup.py new file mode 100644 index 000000000..cb7928980 --- /dev/null +++ b/pinecone/db_control/resources/asyncio/backup.py @@ -0,0 +1,92 @@ +from typing import Optional + +from pinecone.core.openapi.db_control.api.manage_indexes_api import AsyncioManageIndexesApi +from pinecone.core.openapi.db_control.model.create_backup_request import CreateBackupRequest +from pinecone.db_control.models import BackupModel, BackupList +from pinecone.utils import parse_non_empty_args, require_kwargs + + +class BackupResourceAsyncio: + def __init__(self, index_api: AsyncioManageIndexesApi): + self._index_api = index_api + """ @private """ + + @require_kwargs + async def list( + self, + *, + index_name: Optional[str] = None, + limit: Optional[int] = 10, + pagination_token: Optional[str] = None, + ) -> BackupList: + """ + List backups for an index or for the project. + + Args: + index_name (str): The name of the index to list backups for. + limit (int): The maximum number of backups to return. + pagination_token (str): The pagination token to use for the next page of backups. + """ + if index_name is not None: + args = parse_non_empty_args( + [ + ("index_name", index_name), + ("limit", limit), + ("pagination_token", pagination_token), + ] + ) + result = await self._index_api.list_index_backups(**args) + return BackupList(result) + else: + result = await self._index_api.list_project_backups() + return BackupList(result) + + @require_kwargs + async def create( + self, *, index_name: str, backup_name: str, description: str = "" + ) -> BackupModel: + """ + Create a backup for an index. + + Args: + index_name (str): The name of the index to create a backup for. + backup_name (str): The name of the backup to create. + description (str): The description of the backup. + + Returns: + BackupModel: The created backup. + """ + req = CreateBackupRequest(name=backup_name, description=description) + result = await self._index_api.create_backup( + index_name=index_name, create_backup_request=req + ) + return BackupModel(result) + + @require_kwargs + async def describe(self, *, backup_id: str) -> BackupModel: + """ + Describe a backup. + + Args: + backup_id (str): The ID of the backup to describe. + + Returns: + BackupModel: The described backup. + """ + result = await self._index_api.describe_backup(backup_id=backup_id) + return BackupModel(result) + + @require_kwargs + async def get(self, *, backup_id: str) -> BackupModel: + """Alias for describe""" + return await self.describe(backup_id=backup_id) + + @require_kwargs + async def delete(self, *, backup_id: str) -> None: + """ + Delete a backup. + + Args: + backup_id (str): The ID of the backup to delete. + """ + return await self._index_api.delete_backup(backup_id=backup_id) diff --git a/pinecone/db_control/resources/asyncio/index.py b/pinecone/db_control/resources/asyncio/index.py index 2d93ae019..9694b3d2f 100644 --- a/pinecone/db_control/resources/asyncio/index.py +++ b/pinecone/db_control/resources/asyncio/index.py @@ -80,6 +80,22 @@ async def create_for_model( return IndexModel(resp) return await self.__poll_describe_index_until_ready(name, timeout) + async def create_from_backup( + self, + name: str, + backup_id: str, + deletion_protection: Optional[Union[DeletionProtection, str]] = DeletionProtection.DISABLED, + tags: Optional[Dict[str, str]] = None, + timeout: Optional[int] = None, + ) -> IndexModel: + req = PineconeDBControlRequestFactory.create_index_from_backup_request( + name=name, deletion_protection=deletion_protection, tags=tags + ) + await self.index_api.create_index_from_backup( + backup_id=backup_id, create_index_from_backup_request=req + ) + return await self.__poll_describe_index_until_ready(name, timeout) + async def __poll_describe_index_until_ready(self, name: str, timeout: Optional[int] = None): description = None diff --git a/pinecone/db_control/resources/asyncio/restore_job.py b/pinecone/db_control/resources/asyncio/restore_job.py new file mode 100644 index 000000000..db1b0013f --- /dev/null +++ b/pinecone/db_control/resources/asyncio/restore_job.py @@ -0,0 +1,55 @@ +from typing import Optional + +from pinecone.core.openapi.db_control.api.manage_indexes_api import AsyncioManageIndexesApi +from pinecone.core.openapi.db_control.model.restore_job_model import RestoreJobModel +from pinecone.core.openapi.db_control.model.restore_job_list import RestoreJobList +from pinecone.utils import parse_non_empty_args, require_kwargs + + +class RestoreJobResourceAsyncio: + def __init__(self, index_api: AsyncioManageIndexesApi): + self._index_api = index_api + """ @private """ + + @require_kwargs + async def get(self, *, restore_job_id: str) -> RestoreJobModel: + """ + Get a restore job by ID. + + Args: + restore_job_id (str): The ID of the restore job to get. + + Returns: + RestoreJobModel: The restore job. + """ + return await self._index_api.describe_restore_job(restore_job_id=restore_job_id) + + @require_kwargs + async def describe(self, *, restore_job_id: str) -> RestoreJobModel: + """ + Get a restore job by ID. Alias for get. + + Args: + restore_job_id (str): The ID of the restore job to get. + + Returns: + RestoreJobModel: The restore job. + """ + return await self.get(restore_job_id=restore_job_id) + + @require_kwargs + async def list( + self, *, limit: Optional[int] = 10, pagination_token: Optional[str] = None + ) -> RestoreJobList: + """ + List all restore jobs. + + Args: + limit (int): The maximum number of restore jobs to return. + pagination_token (str): The pagination token to use for the next page of restore jobs. + + Returns: + List[RestoreJobModel]: The list of restore jobs. + """ + args = parse_non_empty_args([("limit", limit), ("pagination_token", pagination_token)]) + return await self._index_api.list_restore_jobs(**args) diff --git a/pinecone/db_control/resources/sync/backup.py b/pinecone/db_control/resources/sync/backup.py new file mode 100644 index 000000000..e5e7273f4 --- /dev/null +++ b/pinecone/db_control/resources/sync/backup.py @@ -0,0 +1,86 @@ +from typing import Optional + +from pinecone.core.openapi.db_control.api.manage_indexes_api import ManageIndexesApi +from pinecone.core.openapi.db_control.model.create_backup_request import CreateBackupRequest +from pinecone.db_control.models import BackupModel, BackupList +from pinecone.utils import parse_non_empty_args, require_kwargs + + +class BackupResource: + def __init__(self, index_api: ManageIndexesApi): + self._index_api = index_api + """ @private """ + + @require_kwargs + def list( + self, + *, + index_name: Optional[str] = None, + limit: Optional[int] = 10, + pagination_token: Optional[str] = None, + ) -> BackupList: + """ + List backups for an index or for the project. + + Args: + index_name (str): The name of the index to list backups for. + limit (int): The maximum number of backups to return. + pagination_token (str): The pagination token to use for the next page of backups. + """ + if index_name is not None: + args = parse_non_empty_args( + [ + ("index_name", index_name), + ("limit", limit), + ("pagination_token", pagination_token), + ] + ) + return BackupList(self._index_api.list_index_backups(**args)) + else: + return BackupList(self._index_api.list_project_backups()) + + @require_kwargs + def create(self, *, index_name: str, backup_name: str, description: str = "") -> BackupModel: + """ + Create a backup for an index. + + Args: + index_name (str): The name of the index to create a backup for. + backup_name (str): The name of the backup to create. + description (str): The description of the backup. + + Returns: + BackupModel: The created backup. + """ + req = CreateBackupRequest(name=backup_name, description=description) + return BackupModel( + self._index_api.create_backup(index_name=index_name, create_backup_request=req) + ) + + @require_kwargs + def describe(self, *, backup_id: str) -> BackupModel: + """ + Describe a backup. + + Args: + backup_id (str): The ID of the backup to describe. + + Returns: + BackupModel: The described backup. + """ + return BackupModel(self._index_api.describe_backup(backup_id=backup_id)) + + @require_kwargs + def get(self, *, backup_id: str) -> BackupModel: + """Alias for describe""" + return self.describe(backup_id=backup_id) + + @require_kwargs + def delete(self, *, backup_id: str) -> None: + """ + Delete a backup. + + Args: + backup_id (str): The ID of the backup to delete. + """ + return self._index_api.delete_backup(backup_id=backup_id) diff --git a/pinecone/db_control/resources/sync/index.py b/pinecone/db_control/resources/sync/index.py index 6ecf4cd2a..26dabe24e 100644 --- a/pinecone/db_control/resources/sync/index.py +++ b/pinecone/db_control/resources/sync/index.py @@ -5,7 +5,7 @@ from pinecone.db_control.index_host_store import IndexHostStore from pinecone.db_control.models import ServerlessSpec, PodSpec, IndexModel, IndexList, IndexEmbed -from pinecone.utils import docslinks +from pinecone.utils import docslinks, require_kwargs from pinecone.db_control.enums import ( Metric, @@ -86,6 +86,41 @@ def create_for_model( return IndexModel(resp) return self.__poll_describe_index_until_ready(name, timeout) + @require_kwargs + def create_from_backup( + self, + *, + name: str, + backup_id: str, + deletion_protection: Optional[Union[DeletionProtection, str]] = DeletionProtection.DISABLED, + tags: Optional[Dict[str, str]] = None, + timeout: Optional[int] = None, + ) -> IndexModel: + """ + Create an index from a backup. + + Args: + name (str): The name of the index to create. + backup_id (str): The ID of the backup to create the index from. + deletion_protection (DeletionProtection): The deletion protection to use for the index. + tags (Dict[str, str]): The tags to use for the index. + timeout (int): The number of seconds to wait for the index to be ready. If -1, the function will return without polling for the index status to be ready. If None, the function will poll indefinitely for the index to be ready. + + Returns: + IndexModel: The created index. + """ + req = PineconeDBControlRequestFactory.create_index_from_backup_request( + name=name, deletion_protection=deletion_protection, tags=tags + ) + resp = self._index_api.create_index_from_backup( + backup_id=backup_id, create_index_from_backup_request=req + ) + logger.info(f"Creating index from backup. Response: {resp}") + + if timeout == -1: + return self.describe(name=name) + return self.__poll_describe_index_until_ready(name, timeout) + def __poll_describe_index_until_ready(self, name: str, timeout: Optional[int] = None): description = None diff --git a/pinecone/db_control/resources/sync/restore_job.py b/pinecone/db_control/resources/sync/restore_job.py new file mode 100644 index 000000000..79161fe55 --- /dev/null +++ b/pinecone/db_control/resources/sync/restore_job.py @@ -0,0 +1,55 @@ +from typing import Optional + +from pinecone.core.openapi.db_control.api.manage_indexes_api import ManageIndexesApi +from pinecone.core.openapi.db_control.model.restore_job_model import RestoreJobModel +from pinecone.core.openapi.db_control.model.restore_job_list import RestoreJobList +from pinecone.utils import parse_non_empty_args, require_kwargs + + +class RestoreJobResource: + def __init__(self, index_api: ManageIndexesApi): + self._index_api = index_api + """ @private """ + + @require_kwargs + def get(self, *, restore_job_id: str) -> RestoreJobModel: + """ + Get a restore job by ID. + + Args: + restore_job_id (str): The ID of the restore job to get. + + Returns: + RestoreJobModel: The restore job. + """ + return self._index_api.describe_restore_job(restore_job_id=restore_job_id) + + @require_kwargs + def describe(self, *, restore_job_id: str) -> RestoreJobModel: + """ + Get a restore job by ID. Alias for get. + + Args: + restore_job_id (str): The ID of the restore job to get. + + Returns: + RestoreJobModel: The restore job. + """ + return self.get(restore_job_id=restore_job_id) + + @require_kwargs + def list( + self, *, limit: Optional[int] = 10, pagination_token: Optional[str] = None + ) -> RestoreJobList: + """ + List all restore jobs. + + Args: + limit (int): The maximum number of restore jobs to return. + pagination_token (str): The pagination token to use for the next page of restore jobs. + + Returns: + List[RestoreJobModel]: The list of restore jobs. + """ + args = parse_non_empty_args([("limit", limit), ("pagination_token", pagination_token)]) + return self._index_api.list_restore_jobs(**args) diff --git a/pinecone/legacy_pinecone_interface.py b/pinecone/legacy_pinecone_interface.py index 0b097261c..fc4a4696a 100644 --- a/pinecone/legacy_pinecone_interface.py +++ b/pinecone/legacy_pinecone_interface.py @@ -10,6 +10,10 @@ CollectionList, IndexModel, IndexEmbed, + BackupModel, + BackupList, + RestoreJobModel, + RestoreJobList, ) from pinecone.db_control.enums import ( Metric, @@ -297,6 +301,36 @@ def create_index( """ pass + @abstractmethod + def create_index_from_backup( + self, + *, + name: str, + backup_id: str, + deletion_protection: Optional[Union["DeletionProtection", str]] = "disabled", + tags: Optional[Dict[str, str]] = None, + timeout: Optional[int] = None, + ) -> "IndexModel": + """ + Create an index from a backup. + + Call `list_backups` to get a list of backups for your project. + + :param name: The name of the index to create. + :type name: str + :param backup_id: The ID of the backup to restore. + :type backup_id: str + :param deletion_protection: If enabled, the index cannot be deleted. If disabled, the index can be deleted. This setting can be changed with `configure_index`. + :type deletion_protection: Optional[Literal["enabled", "disabled"]] + :param tags: Tags are key-value pairs you can attach to indexes to better understand, organize, and identify your resources. Some example use cases include tagging indexes with the name of the model that generated the embeddings, the date the index was created, or the purpose of the index. + :type tags: Optional[Dict[str, str]] + :param timeout: Specify the number of seconds to wait until index is ready to receive data. If None, wait indefinitely; if >=0, time out after this many seconds; + if -1, return immediately and do not wait. + :return: A description of the index that was created. + :rtype: IndexModel + """ + pass + @abstractmethod def create_index_for_model( self, @@ -701,6 +735,77 @@ def describe_collection(self, name: str): """ pass + @abstractmethod + def create_backup( + self, *, index_name: str, backup_name: str, description: str = "" + ) -> "BackupModel": + """Create a backup of an index. + + Args: + index_name (str): The name of the index to backup. + backup_name (str): The name to give the backup. + description (str): Optional description of the backup. + """ + pass + + @abstractmethod + def list_backups( + self, + *, + index_name: Optional[str] = None, + limit: Optional[int] = 10, + pagination_token: Optional[str] = None, + ) -> "BackupList": + """List backups. + + If index_name is provided, the backups will be filtered by index. If no index_name is provided, all backups in the projectwill be returned. + + Args: + index_name (str): The name of the index to list backups for. + limit (int): The maximum number of backups to return. + pagination_token (str): The pagination token to use for pagination. + """ + pass + + @abstractmethod + def describe_backup(self, *, backup_id: str) -> "BackupModel": + """Describe a backup. + + Args: + backup_id (str): The ID of the backup to describe. + """ + pass + + @abstractmethod + def delete_backup(self, *, backup_id: str) -> None: + """Delete a backup. + + Args: + backup_id (str): The ID of the backup to delete. + """ + pass + + @abstractmethod + def list_restore_jobs( + self, *, limit: Optional[int] = 10, pagination_token: Optional[str] = None + ) -> "RestoreJobList": + """List restore jobs. + + Args: + limit (int): The maximum number of restore jobs to return. + pagination_token (str): The pagination token to use for pagination. + """ + pass + + @abstractmethod + def describe_restore_job(self, *, restore_job_id: str) -> "RestoreJobModel": + """Describe a restore job. + + Args: + restore_job_id (str): The ID of the restore job to describe. + """ + pass + @abstractmethod def Index(self, name: str = "", host: str = "", **kwargs): """ diff --git a/pinecone/openapi_support/api_version.py b/pinecone/openapi_support/api_version.py index 4879ae90a..8725b9f27 100644 --- a/pinecone/openapi_support/api_version.py +++ b/pinecone/openapi_support/api_version.py @@ -2,4 +2,4 @@ # Do not edit this file manually. API_VERSION = "2025-04" -APIS_REPO_SHA = "483b3885439a51ef831b820bfa621e2c9515834f" +APIS_REPO_SHA = "ba143abc7449abfcf0b6635f1aabff2400dac762" diff --git a/pinecone/pinecone.py b/pinecone/pinecone.py index e5bc112ae..bddf1dd5c 100644 --- a/pinecone/pinecone.py +++ b/pinecone/pinecone.py @@ -7,7 +7,7 @@ from .legacy_pinecone_interface import LegacyPineconeDBControlInterface -from pinecone.utils import normalize_host, PluginAware, docslinks +from pinecone.utils import normalize_host, PluginAware, docslinks, require_kwargs from .langchain_import_warnings import _build_langchain_attribute_error_message logger = logging.getLogger(__name__) @@ -41,6 +41,10 @@ IndexList, CollectionList, IndexEmbed, + BackupModel, + BackupList, + RestoreJobModel, + RestoreJobList, ) @@ -212,6 +216,24 @@ def create_index_for_model( timeout=timeout, ) + @require_kwargs + def create_index_from_backup( + self, + *, + index_name: str, + backup_id: str, + deletion_protection: Optional[Union["DeletionProtection", str]] = "disabled", + tags: Optional[Dict[str, str]] = None, + timeout: Optional[int] = None, + ) -> "IndexModel": + return self.db.index.create_from_backup( + index_name=index_name, + backup_id=backup_id, + deletion_protection=deletion_protection, + tags=tags, + timeout=timeout, + ) + def delete_index(self, name: str, timeout: Optional[int] = None): return self.db.index.delete(name=name, timeout=timeout) @@ -252,6 +274,44 @@ def delete_collection(self, name: str) -> None: def describe_collection(self, name: str): return self.db.collection.describe(name=name) + @require_kwargs + def create_backup( + self, *, index_name: str, backup_name: str, description: str = "" + ) -> "BackupModel": + return self.db.backup.create( + index_name=index_name, backup_name=backup_name, description=description + ) + + @require_kwargs + def list_backups( + self, + *, + index_name: Optional[str] = None, + limit: Optional[int] = 10, + pagination_token: Optional[str] = None, + ) -> "BackupList": + return self.db.backup.list( + index_name=index_name, limit=limit, pagination_token=pagination_token + ) + + @require_kwargs + def describe_backup(self, *, backup_id: str) -> "BackupModel": + return self.db.backup.describe(backup_id=backup_id) + + @require_kwargs + def delete_backup(self, *, backup_id: str) -> None: + return self.db.backup.delete(backup_id=backup_id) + + @require_kwargs + def list_restore_jobs( + self, *, limit: Optional[int] = 10, pagination_token: Optional[str] = None + ) -> "RestoreJobList": + return self.db.restore_job.list(limit=limit, pagination_token=pagination_token) + + @require_kwargs + def describe_restore_job(self, *, restore_job_id: str) -> "RestoreJobModel": + return self.db.restore_job.describe(restore_job_id=restore_job_id) + @staticmethod def from_texts(*args, **kwargs): """@private""" diff --git a/pinecone/pinecone_asyncio.py b/pinecone/pinecone_asyncio.py index 278039e64..2ba6be872 100644 --- a/pinecone/pinecone_asyncio.py +++ b/pinecone/pinecone_asyncio.py @@ -4,7 +4,7 @@ from pinecone.config import PineconeConfig, ConfigBuilder -from pinecone.utils import normalize_host +from pinecone.utils import normalize_host, require_kwargs from pinecone.utils import docslinks from .pinecone_interface_asyncio import PineconeAsyncioDBControlInterface @@ -30,8 +30,12 @@ IndexList, CollectionList, IndexEmbed, + BackupModel, + BackupList, + RestoreJobModel, + RestoreJobList, ) - from pinecone.core.openapi.db_control.api.manage_indexes_api import ManageIndexesApi + from pinecone.core.openapi.db_control.api.manage_indexes_api import AsyncioManageIndexesApi from pinecone.db_control.index_host_store import IndexHostStore logger = logging.getLogger(__name__) @@ -179,7 +183,7 @@ def index_host_store(self) -> "IndexHostStore": return self.db.index._index_host_store @property - def index_api(self) -> "ManageIndexesApi": + def index_api(self) -> "AsyncioManageIndexesApi": """@private""" warnings.warn( "The `index_api` property is deprecated. This warning will become an error in a future version of the Pinecone Python SDK.", @@ -231,6 +235,24 @@ async def create_index_for_model( timeout=timeout, ) + @require_kwargs + async def create_index_from_backup( + self, + *, + name: str, + backup_id: str, + deletion_protection: Optional[Union["DeletionProtection", str]] = "disabled", + tags: Optional[Dict[str, str]] = None, + timeout: Optional[int] = None, + ) -> "IndexModel": + return await self.db.index.create_from_backup( + name=name, + backup_id=backup_id, + deletion_protection=deletion_protection, + tags=tags, + timeout=timeout, + ) + async def delete_index(self, name: str, timeout: Optional[int] = None): return await self.db.index.delete(name=name, timeout=timeout) @@ -271,6 +293,44 @@ async def delete_collection(self, name: str): async def describe_collection(self, name: str): return await self.db.collection.describe(name=name) + @require_kwargs + async def create_backup( + self, *, index_name: str, backup_name: str, description: str = "" + ) -> "BackupModel": + return await self.db.backup.create( + index_name=index_name, backup_name=backup_name, description=description + ) + + @require_kwargs + async def list_backups( + self, + *, + index_name: Optional[str] = None, + limit: Optional[int] = 10, + pagination_token: Optional[str] = None, + ) -> "BackupList": + return await self.db.backup.list( + index_name=index_name, limit=limit, pagination_token=pagination_token + ) + + @require_kwargs + async def describe_backup(self, *, backup_id: str) -> "BackupModel": + return await self.db.backup.describe(backup_id=backup_id) + + @require_kwargs + async def delete_backup(self, *, backup_id: str) -> None: + return await self.db.backup.delete(backup_id=backup_id) + + @require_kwargs + async def list_restore_jobs( + self, *, limit: Optional[int] = 10, pagination_token: Optional[str] = None + ) -> "RestoreJobList": + return await self.db.restore_job.list(limit=limit, pagination_token=pagination_token) + + @require_kwargs + async def describe_restore_job(self, *, restore_job_id: str) -> "RestoreJobModel": + return await self.db.restore_job.describe(restore_job_id=restore_job_id) + def IndexAsyncio(self, host: str, **kwargs) -> "_IndexAsyncio": from pinecone.db_data import _IndexAsyncio diff --git a/pinecone/pinecone_interface_asyncio.py b/pinecone/pinecone_interface_asyncio.py index 31d1febad..b17615298 100644 --- a/pinecone/pinecone_interface_asyncio.py +++ b/pinecone/pinecone_interface_asyncio.py @@ -14,6 +14,10 @@ CollectionList, IndexModel, IndexEmbed, + BackupModel, + BackupList, + RestoreJobModel, + RestoreJobList, ) from pinecone.db_control.enums import ( Metric, @@ -482,6 +486,36 @@ async def main(): """ pass + @abstractmethod + def create_index_from_backup( + self, + *, + name: str, + backup_id: str, + deletion_protection: Optional[Union["DeletionProtection", str]] = "disabled", + tags: Optional[Dict[str, str]] = None, + timeout: Optional[int] = None, + ) -> "IndexModel": + """ + Create an index from a backup. + + Call `list_backups` to get a list of backups for your project. + + :param name: The name of the index to create. + :type name: str + :param backup_id: The ID of the backup to restore. + :type backup_id: str + :param deletion_protection: If enabled, the index cannot be deleted. If disabled, the index can be deleted. This setting can be changed with `configure_index`. + :type deletion_protection: Optional[Literal["enabled", "disabled"]] + :param tags: Tags are key-value pairs you can attach to indexes to better understand, organize, and identify your resources. Some example use cases include tagging indexes with the name of the model that generated the embeddings, the date the index was created, or the purpose of the index. + :type tags: Optional[Dict[str, str]] + :param timeout: Specify the number of seconds to wait until index is ready to receive data. If None, wait indefinitely; if >=0, time out after this many seconds; + if -1, return immediately and do not wait. + :return: A description of the index that was created. + :rtype: IndexModel + """ + pass + @abstractmethod async def delete_index(self, name: str, timeout: Optional[int] = None): """ @@ -772,6 +806,77 @@ async def main(): """ pass + @abstractmethod + async def create_backup( + self, *, index_name: str, backup_name: str, description: str = "" + ) -> "BackupModel": + """Create a backup of an index. + + Args: + index_name (str): The name of the index to backup. + backup_name (str): The name to give the backup. + description (str): Optional description of the backup. + """ + pass + + @abstractmethod + async def list_backups( + self, + *, + index_name: Optional[str] = None, + limit: Optional[int] = 10, + pagination_token: Optional[str] = None, + ) -> "BackupList": + """List backups. + + If index_name is provided, the backups will be filtered by index. If no index_name is provided, all backups in the projectwill be returned. + + Args: + index_name (str): The name of the index to list backups for. + limit (int): The maximum number of backups to return. + pagination_token (str): The pagination token to use for pagination. + """ + pass + + @abstractmethod + async def describe_backup(self, *, backup_id: str) -> "BackupModel": + """Describe a backup. + + Args: + backup_id (str): The ID of the backup to describe. + """ + pass + + @abstractmethod + async def delete_backup(self, *, backup_id: str) -> None: + """Delete a backup. + + Args: + backup_id (str): The ID of the backup to delete. + """ + pass + + @abstractmethod + async def list_restore_jobs( + self, *, limit: Optional[int] = 10, pagination_token: Optional[str] = None + ) -> "RestoreJobList": + """List restore jobs. + + Args: + limit (int): The maximum number of restore jobs to return. + pagination_token (str): The pagination token to use for pagination. + """ + pass + + @abstractmethod + async def describe_restore_job(self, *, restore_job_id: str) -> "RestoreJobModel": + """Describe a restore job. + + Args: + restore_job_id (str): The ID of the restore job to describe. + """ + pass + @abstractmethod async def create_collection(self, name: str, source: str): """Create a collection from a pod-based index diff --git a/pinecone/utils/__init__.py b/pinecone/utils/__init__.py index 3d72b5d64..33d286d81 100644 --- a/pinecone/utils/__init__.py +++ b/pinecone/utils/__init__.py @@ -17,6 +17,7 @@ from .error_handling import validate_and_convert_errors from .plugin_aware import PluginAware from .filter_dict import filter_dict +from .require_kwargs import require_kwargs __all__ = [ "PluginAware", @@ -36,4 +37,5 @@ "validate_and_convert_errors", "convert_enum_to_string", "filter_dict", + "require_kwargs", ] diff --git a/pinecone/utils/require_kwargs.py b/pinecone/utils/require_kwargs.py new file mode 100644 index 000000000..9321f4689 --- /dev/null +++ b/pinecone/utils/require_kwargs.py @@ -0,0 +1,16 @@ +import functools +import inspect + + +def require_kwargs(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if len(args) > 1: # First arg is self + param_names = list(inspect.signature(func).parameters.keys())[1:] # Skip self + raise TypeError( + f"{func.__name__}() requires keyword arguments. " + f"Please use {func.__name__}({', '.join(f'{name}=value' for name in param_names)})" + ) + return func(*args, **kwargs) + + return wrapper From 0485df9e5cec0102f07b62484305f89d8c9f5c98 Mon Sep 17 00:00:00 2001 From: Jen Hamon Date: Tue, 13 May 2025 12:17:06 -0400 Subject: [PATCH 2/7] Fix param name --- pinecone/pinecone.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pinecone/pinecone.py b/pinecone/pinecone.py index bddf1dd5c..e9632c036 100644 --- a/pinecone/pinecone.py +++ b/pinecone/pinecone.py @@ -220,14 +220,14 @@ def create_index_for_model( def create_index_from_backup( self, *, - index_name: str, + name: str, backup_id: str, deletion_protection: Optional[Union["DeletionProtection", str]] = "disabled", tags: Optional[Dict[str, str]] = None, timeout: Optional[int] = None, ) -> "IndexModel": return self.db.index.create_from_backup( - index_name=index_name, + name=name, backup_id=backup_id, deletion_protection=deletion_protection, tags=tags, From 3fff801ec1a78c2fbd5d13145c36f36da791cc9d Mon Sep 17 00:00:00 2001 From: Jen Hamon Date: Wed, 14 May 2025 00:54:48 -0400 Subject: [PATCH 3/7] Add integration tests --- .github/workflows/testing-integration.yaml | 2 + pinecone/db_control/models/__init__.py | 4 +- pinecone/db_control/models/backup_list.py | 26 ++- pinecone/db_control/models/backup_model.py | 5 + pinecone/db_control/models/index_model.py | 5 + .../db_control/models/restore_job_list.py | 50 ++++ .../db_control/models/restore_job_model.py | 25 ++ pinecone/db_control/repr_overrides.py | 5 +- .../resources/asyncio/restore_job.py | 19 +- .../db_control/resources/sync/restore_job.py | 19 +- pinecone/legacy_pinecone_interface.py | 4 +- pinecone/pinecone.py | 4 +- pinecone/pinecone_asyncio.py | 4 +- pinecone/pinecone_interface_asyncio.py | 4 +- pinecone/scripts/repl.py | 52 +++++ tests/integration/control/backup/__init__.py | 0 tests/integration/control/backup/conftest.py | 168 +++++++++++++ .../integration/control/backup/test_backup.py | 195 ++++++++++++++++ .../control/restore_job/__init__.py | 0 .../control/restore_job/conftest.py | 168 +++++++++++++ .../control/restore_job/test_describe.py | 38 +++ .../control/restore_job/test_list.py | 58 +++++ .../control_asyncio/backup/__init__.py | 0 .../control_asyncio/backup/conftest.py | 220 ++++++++++++++++++ .../control_asyncio/backup/test_backup.py | 205 ++++++++++++++++ .../control_asyncio/restore_job/__init__.py | 0 .../control_asyncio/restore_job/conftest.py | 220 ++++++++++++++++++ .../restore_job/test_describe.py | 42 ++++ .../control_asyncio/restore_job/test_list.py | 67 ++++++ tests/integration/helpers/helpers.py | 3 + 30 files changed, 1577 insertions(+), 35 deletions(-) create mode 100644 pinecone/db_control/models/restore_job_list.py create mode 100644 pinecone/db_control/models/restore_job_model.py create mode 100644 pinecone/scripts/repl.py create mode 100644 tests/integration/control/backup/__init__.py create mode 100644 tests/integration/control/backup/conftest.py create mode 100644 tests/integration/control/backup/test_backup.py create mode 100644 tests/integration/control/restore_job/__init__.py create mode 100644 tests/integration/control/restore_job/conftest.py create mode 100644 tests/integration/control/restore_job/test_describe.py create mode 100644 tests/integration/control/restore_job/test_list.py create mode 100644 tests/integration/control_asyncio/backup/__init__.py create mode 100644 tests/integration/control_asyncio/backup/conftest.py create mode 100644 tests/integration/control_asyncio/backup/test_backup.py create mode 100644 tests/integration/control_asyncio/restore_job/__init__.py create mode 100644 tests/integration/control_asyncio/restore_job/conftest.py create mode 100644 tests/integration/control_asyncio/restore_job/test_describe.py create mode 100644 tests/integration/control_asyncio/restore_job/test_list.py diff --git a/.github/workflows/testing-integration.yaml b/.github/workflows/testing-integration.yaml index 53023bdeb..6756c7796 100644 --- a/.github/workflows/testing-integration.yaml +++ b/.github/workflows/testing-integration.yaml @@ -27,6 +27,8 @@ jobs: run: poetry run pytest tests/integration/control/index --retries 5 --retry-delay 35 -s -vv --log-cli-level=DEBUG - name: 'Run collection tests' run: poetry run pytest tests/integration/control/collections --retries 5 --retry-delay 35 -s -vv --log-cli-level=DEBUG + - name: 'Run backup & restore tests' + run: poetry run pytest tests/integration/control/backup --retries 5 --retry-delay 35 -s -vv --log-cli-level=DEBUG inference: diff --git a/pinecone/db_control/models/__init__.py b/pinecone/db_control/models/__init__.py index 02bc7cc62..66568de33 100644 --- a/pinecone/db_control/models/__init__.py +++ b/pinecone/db_control/models/__init__.py @@ -8,8 +8,8 @@ from ...inference.models.index_embed import IndexEmbed from .backup_model import BackupModel from .backup_list import BackupList -from pinecone.core.openapi.db_control.model.restore_job_model import RestoreJobModel -from pinecone.core.openapi.db_control.model.restore_job_list import RestoreJobList +from .restore_job_model import RestoreJobModel +from .restore_job_list import RestoreJobList __all__ = [ diff --git a/pinecone/db_control/models/backup_list.py b/pinecone/db_control/models/backup_list.py index 14f5438a4..fe21c077a 100644 --- a/pinecone/db_control/models/backup_list.py +++ b/pinecone/db_control/models/backup_list.py @@ -13,7 +13,20 @@ def names(self) -> List[str]: return [i.name for i in self._backups] def __getitem__(self, key): - return self.indexes[key] + if isinstance(key, int): + return self._backups[key] + elif key == "data": + return self._backups + else: + # pagination and any other keys added in the future + return self._backup_list[key] + + def __getattr__(self, attr): + if attr == "data": + return self._backups + else: + # pagination and any other keys added in the future + return getattr(self._backup_list, attr) def __len__(self): return len(self._backups) @@ -25,7 +38,12 @@ def __str__(self): return str(self._backups) def __repr__(self): - return json.dumps([i.to_dict() for i in self._backups], indent=4) + raw_dict = self._backup_list.to_dict() + raw_dict["data"] = [i.to_dict() for i in self._backups] - def __getattr__(self, attr): - return getattr(self._backup_list, attr) + # Remove keys with value None + for key, value in list(raw_dict.items()): + if value is None: + del raw_dict[key] + + return json.dumps(raw_dict, indent=4) diff --git a/pinecone/db_control/models/backup_model.py b/pinecone/db_control/models/backup_model.py index c278692ae..0d49d33eb 100644 --- a/pinecone/db_control/models/backup_model.py +++ b/pinecone/db_control/models/backup_model.py @@ -1,4 +1,6 @@ +import json from pinecone.core.openapi.db_control.model.backup_model import BackupModel as OpenAPIBackupModel +from pinecone.utils.repr_overrides import custom_serializer class BackupModel: @@ -14,5 +16,8 @@ def __getattr__(self, attr): def __getitem__(self, key): return self.__getattr__(key) + def __repr__(self): + return json.dumps(self.to_dict(), indent=4, default=custom_serializer) + def to_dict(self): return self._backup.to_dict() diff --git a/pinecone/db_control/models/index_model.py b/pinecone/db_control/models/index_model.py index 75ba1f30f..a268df573 100644 --- a/pinecone/db_control/models/index_model.py +++ b/pinecone/db_control/models/index_model.py @@ -1,4 +1,6 @@ from pinecone.core.openapi.db_control.model.index_model import IndexModel as OpenAPIIndexModel +import json +from pinecone.utils.repr_overrides import custom_serializer class IndexModel: @@ -15,5 +17,8 @@ def __getattr__(self, attr): def __getitem__(self, key): return self.__getattr__(key) + def __repr__(self): + return json.dumps(self.to_dict(), indent=4, default=custom_serializer) + def to_dict(self): return self.index.to_dict() diff --git a/pinecone/db_control/models/restore_job_list.py b/pinecone/db_control/models/restore_job_list.py new file mode 100644 index 000000000..7c80aa96e --- /dev/null +++ b/pinecone/db_control/models/restore_job_list.py @@ -0,0 +1,50 @@ +import json +from pinecone.core.openapi.db_control.model.restore_job_list import ( + RestoreJobList as OpenAPIRestoreJobList, +) +from .restore_job_model import RestoreJobModel + +from datetime import datetime + + +def custom_serializer(obj): + if isinstance(obj, datetime): + return obj.isoformat() + else: + return str(obj) + + +class RestoreJobList: + def __init__(self, restore_job_list: OpenAPIRestoreJobList): + self._restore_job_list = restore_job_list + self._restore_jobs = [RestoreJobModel(r) for r in self._restore_job_list.data] + + def __getitem__(self, key): + if isinstance(key, int): + return self._restore_jobs[key] + elif key == "data": + return self._restore_jobs + else: + # pagination and any other keys added in the future + return self._restore_job_list[key] + + def __getattr__(self, attr): + if attr == "data": + return self._restore_jobs + else: + # pagination and any other keys added in the future + return getattr(self._restore_job_list, attr) + + def __len__(self): + return len(self._restore_jobs) + + def __iter__(self): + return iter(self._restore_jobs) + + def __str__(self): + return str(self._restore_jobs) + + def __repr__(self): + return json.dumps( + [i.to_dict() for i in self._restore_jobs], indent=4, default=custom_serializer + ) diff --git a/pinecone/db_control/models/restore_job_model.py b/pinecone/db_control/models/restore_job_model.py new file mode 100644 index 000000000..1dc6902dc --- /dev/null +++ b/pinecone/db_control/models/restore_job_model.py @@ -0,0 +1,25 @@ +import json +from pinecone.core.openapi.db_control.model.restore_job_model import ( + RestoreJobModel as OpenAPIRestoreJobModel, +) +from pinecone.utils.repr_overrides import custom_serializer + + +class RestoreJobModel: + def __init__(self, restore_job: OpenAPIRestoreJobModel): + self.restore_job = restore_job + + def __str__(self): + return str(self.restore_job) + + def __getattr__(self, attr): + return getattr(self.restore_job, attr) + + def __getitem__(self, key): + return self.__getattr__(key) + + def __repr__(self): + return json.dumps(self.to_dict(), indent=4, default=custom_serializer) + + def to_dict(self): + return self.restore_job.to_dict() diff --git a/pinecone/db_control/repr_overrides.py b/pinecone/db_control/repr_overrides.py index 714b8dfb4..ce6e9611e 100644 --- a/pinecone/db_control/repr_overrides.py +++ b/pinecone/db_control/repr_overrides.py @@ -1,5 +1,4 @@ -from pinecone.utils import install_json_repr_override -from pinecone.db_control.models.index_model import IndexModel +from pinecone.utils.repr_overrides import install_json_repr_override from pinecone.core.openapi.db_control.model.collection_model import CollectionModel @@ -12,5 +11,5 @@ def install_repr_overrides(): from pprint.pformat seems better for data plane objects such as lists of query results. """ - for model in [IndexModel, CollectionModel]: + for model in [CollectionModel]: install_json_repr_override(model) diff --git a/pinecone/db_control/resources/asyncio/restore_job.py b/pinecone/db_control/resources/asyncio/restore_job.py index db1b0013f..397a5050e 100644 --- a/pinecone/db_control/resources/asyncio/restore_job.py +++ b/pinecone/db_control/resources/asyncio/restore_job.py @@ -1,8 +1,7 @@ from typing import Optional from pinecone.core.openapi.db_control.api.manage_indexes_api import AsyncioManageIndexesApi -from pinecone.core.openapi.db_control.model.restore_job_model import RestoreJobModel -from pinecone.core.openapi.db_control.model.restore_job_list import RestoreJobList +from pinecone.db_control.models import RestoreJobModel, RestoreJobList from pinecone.utils import parse_non_empty_args, require_kwargs @@ -12,30 +11,31 @@ def __init__(self, index_api: AsyncioManageIndexesApi): """ @private """ @require_kwargs - async def get(self, *, restore_job_id: str) -> RestoreJobModel: + async def get(self, *, job_id: str) -> RestoreJobModel: """ Get a restore job by ID. Args: - restore_job_id (str): The ID of the restore job to get. + job_id (str): The ID of the restore job to get. Returns: RestoreJobModel: The restore job. """ - return await self._index_api.describe_restore_job(restore_job_id=restore_job_id) + job = await self._index_api.describe_restore_job(job_id=job_id) + return RestoreJobModel(job) @require_kwargs - async def describe(self, *, restore_job_id: str) -> RestoreJobModel: + async def describe(self, *, job_id: str) -> RestoreJobModel: """ Get a restore job by ID. Alias for get. Args: - restore_job_id (str): The ID of the restore job to get. + job_id (str): The ID of the restore job to get. Returns: RestoreJobModel: The restore job. """ - return await self.get(restore_job_id=restore_job_id) + return await self.get(job_id=job_id) @require_kwargs async def list( @@ -52,4 +52,5 @@ async def list( List[RestoreJobModel]: The list of restore jobs. """ args = parse_non_empty_args([("limit", limit), ("pagination_token", pagination_token)]) - return await self._index_api.list_restore_jobs(**args) + jobs = await self._index_api.list_restore_jobs(**args) + return RestoreJobList(jobs) diff --git a/pinecone/db_control/resources/sync/restore_job.py b/pinecone/db_control/resources/sync/restore_job.py index 79161fe55..b314bc53d 100644 --- a/pinecone/db_control/resources/sync/restore_job.py +++ b/pinecone/db_control/resources/sync/restore_job.py @@ -1,8 +1,7 @@ from typing import Optional from pinecone.core.openapi.db_control.api.manage_indexes_api import ManageIndexesApi -from pinecone.core.openapi.db_control.model.restore_job_model import RestoreJobModel -from pinecone.core.openapi.db_control.model.restore_job_list import RestoreJobList +from pinecone.db_control.models import RestoreJobModel, RestoreJobList from pinecone.utils import parse_non_empty_args, require_kwargs @@ -12,30 +11,31 @@ def __init__(self, index_api: ManageIndexesApi): """ @private """ @require_kwargs - def get(self, *, restore_job_id: str) -> RestoreJobModel: + def get(self, *, job_id: str) -> RestoreJobModel: """ Get a restore job by ID. Args: - restore_job_id (str): The ID of the restore job to get. + job_id (str): The ID of the restore job to get. Returns: RestoreJobModel: The restore job. """ - return self._index_api.describe_restore_job(restore_job_id=restore_job_id) + job = self._index_api.describe_restore_job(job_id=job_id) + return RestoreJobModel(job) @require_kwargs - def describe(self, *, restore_job_id: str) -> RestoreJobModel: + def describe(self, *, job_id: str) -> RestoreJobModel: """ Get a restore job by ID. Alias for get. Args: - restore_job_id (str): The ID of the restore job to get. + job_id (str): The ID of the restore job to get. Returns: RestoreJobModel: The restore job. """ - return self.get(restore_job_id=restore_job_id) + return self.get(job_id=job_id) @require_kwargs def list( @@ -52,4 +52,5 @@ def list( List[RestoreJobModel]: The list of restore jobs. """ args = parse_non_empty_args([("limit", limit), ("pagination_token", pagination_token)]) - return self._index_api.list_restore_jobs(**args) + jobs = self._index_api.list_restore_jobs(**args) + return RestoreJobList(jobs) diff --git a/pinecone/legacy_pinecone_interface.py b/pinecone/legacy_pinecone_interface.py index fc4a4696a..cb896022e 100644 --- a/pinecone/legacy_pinecone_interface.py +++ b/pinecone/legacy_pinecone_interface.py @@ -798,11 +798,11 @@ def list_restore_jobs( pass @abstractmethod - def describe_restore_job(self, *, restore_job_id: str) -> "RestoreJobModel": + def describe_restore_job(self, *, job_id: str) -> "RestoreJobModel": """Describe a restore job. Args: - restore_job_id (str): The ID of the restore job to describe. + job_id (str): The ID of the restore job to describe. """ pass diff --git a/pinecone/pinecone.py b/pinecone/pinecone.py index e9632c036..ae854129d 100644 --- a/pinecone/pinecone.py +++ b/pinecone/pinecone.py @@ -309,8 +309,8 @@ def list_restore_jobs( return self.db.restore_job.list(limit=limit, pagination_token=pagination_token) @require_kwargs - def describe_restore_job(self, *, restore_job_id: str) -> "RestoreJobModel": - return self.db.restore_job.describe(restore_job_id=restore_job_id) + def describe_restore_job(self, *, job_id: str) -> "RestoreJobModel": + return self.db.restore_job.describe(job_id=job_id) @staticmethod def from_texts(*args, **kwargs): diff --git a/pinecone/pinecone_asyncio.py b/pinecone/pinecone_asyncio.py index 2ba6be872..124ac854c 100644 --- a/pinecone/pinecone_asyncio.py +++ b/pinecone/pinecone_asyncio.py @@ -328,8 +328,8 @@ async def list_restore_jobs( return await self.db.restore_job.list(limit=limit, pagination_token=pagination_token) @require_kwargs - async def describe_restore_job(self, *, restore_job_id: str) -> "RestoreJobModel": - return await self.db.restore_job.describe(restore_job_id=restore_job_id) + async def describe_restore_job(self, *, job_id: str) -> "RestoreJobModel": + return await self.db.restore_job.describe(job_id=job_id) def IndexAsyncio(self, host: str, **kwargs) -> "_IndexAsyncio": from pinecone.db_data import _IndexAsyncio diff --git a/pinecone/pinecone_interface_asyncio.py b/pinecone/pinecone_interface_asyncio.py index b17615298..6dfd953c9 100644 --- a/pinecone/pinecone_interface_asyncio.py +++ b/pinecone/pinecone_interface_asyncio.py @@ -869,11 +869,11 @@ async def list_restore_jobs( pass @abstractmethod - async def describe_restore_job(self, *, restore_job_id: str) -> "RestoreJobModel": + async def describe_restore_job(self, *, job_id: str) -> "RestoreJobModel": """Describe a restore job. Args: - restore_job_id (str): The ID of the restore job to describe. + job_id (str): The ID of the restore job to describe. """ pass diff --git a/pinecone/scripts/repl.py b/pinecone/scripts/repl.py new file mode 100644 index 000000000..55f80c5cd --- /dev/null +++ b/pinecone/scripts/repl.py @@ -0,0 +1,52 @@ +import code +import logging + + +def setup_logging(): + # Create a custom formatter + formatter = logging.Formatter( + fmt="%(asctime)s | %(levelname)-8s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S" + ) + + # Create and configure the console handler + console_handler = logging.StreamHandler() + console_handler.setFormatter(formatter) + + # Configure the root logger + root_logger = logging.getLogger() + root_logger.setLevel(logging.INFO) + root_logger.addHandler(console_handler) + + return root_logger + + +def main(): + # Set up logging + logger = setup_logging() + logger.info("Initializing environment...") + + # You can add any setup code here, such as: + # - Setting environment variables + # - Importing commonly used modules + # - Loading configuration files + + # Start the interactive REPL + banner = """ + Welcome to the custom Python REPL! + Your initialization steps have been completed. + """ + + # Create a custom namespace with any pre-loaded variables + namespace = { + "__name__": "__main__", + "__doc__": None, + "logger": logger, # Make logger available in REPL + # Add any other variables you want to have available in the REPL + } + + # Start the interactive console + code.interact(banner=banner, local=namespace) + + +if __name__ == "__main__": + main() diff --git a/tests/integration/control/backup/__init__.py b/tests/integration/control/backup/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/control/backup/conftest.py b/tests/integration/control/backup/conftest.py new file mode 100644 index 000000000..9798da273 --- /dev/null +++ b/tests/integration/control/backup/conftest.py @@ -0,0 +1,168 @@ +import pytest +import uuid +import time +import logging +import dotenv +from pinecone import Pinecone, NotFoundException, PineconeApiException +from ...helpers import generate_index_name, get_environment_var, index_tags as index_tags_helper + +dotenv.load_dotenv() + +logger = logging.getLogger(__name__) +""" @private """ + +# Generate a unique ID for the entire test run +RUN_ID = str(uuid.uuid4()) + + +@pytest.fixture() +def index_tags(request): + return index_tags_helper(request, RUN_ID) + + +@pytest.fixture() +def pc(): + api_key = get_environment_var("PINECONE_API_KEY") + return Pinecone( + api_key=api_key, additional_headers={"sdk-test-suite": "pinecone-python-client"} + ) + + +@pytest.fixture() +def serverless_cloud(): + return get_environment_var("SERVERLESS_CLOUD", "aws") + + +@pytest.fixture() +def serverless_region(): + return get_environment_var("SERVERLESS_REGION", "us-west-2") + + +@pytest.fixture() +def create_sl_index_params(index_name, serverless_cloud, serverless_region, index_tags): + spec = {"serverless": {"cloud": serverless_cloud, "region": serverless_region}} + return dict(name=index_name, dimension=10, metric="cosine", spec=spec, tags=index_tags) + + +@pytest.fixture() +def index_name(request): + test_name = request.node.name + return generate_index_name(test_name) + + +@pytest.fixture() +def ready_sl_index(pc, index_name, create_sl_index_params): + create_sl_index_params["timeout"] = None + pc.create_index(**create_sl_index_params) + yield index_name + pc.db.index.delete(name=index_name, timeout=-1) + + +def delete_with_retry(pc, index_name, retries=0, sleep_interval=5): + logger.debug( + "Deleting index " + + index_name + + ", retry " + + str(retries) + + ", next sleep interval " + + str(sleep_interval) + ) + try: + pc.db.index.delete(name=index_name, timeout=-1) + except NotFoundException: + pass + except PineconeApiException as e: + if e.error.code == "PRECONDITON_FAILED": + if retries > 5: + raise Exception("Unable to delete index " + index_name) + time.sleep(sleep_interval) + delete_with_retry(pc, index_name, retries + 1, sleep_interval * 2) + else: + logger.error(e.__class__) + logger.error(e) + raise Exception("Unable to delete index " + index_name) + except Exception as e: + logger.error(e.__class__) + logger.error(e) + raise Exception("Unable to delete index " + index_name) + + +@pytest.fixture(autouse=True) +def cleanup(pc, index_name): + yield + + try: + desc = pc.db.index.describe(name=index_name) + if desc.deletion_protection == "enabled": + logger.info(f"Disabling deletion protection for index: {index_name}") + pc.db.index.configure(name=index_name, deletion_protection="disabled") + logger.debug("Attempting to delete index with name: " + index_name) + pc.db.index.delete(name=index_name, timeout=-1) + except Exception: + pass + + for backup in pc.db.backup.list(): + logger.debug(f"Deleting backup: {backup.name}") + try: + pc.db.backup.delete(backup_id=backup.backup_id) + except Exception as e: + logger.warning(f"Failed to delete backup: {backup.name}: {str(e)}") + + +def pytest_sessionfinish(session, exitstatus): + """ + Hook that runs after all tests have completed. + This is a good place to clean up any resources that were created during the test session. + """ + logger.info("Running final cleanup after all tests...") + + try: + pc = Pinecone() + indexes = pc.db.index.list() + test_indexes = [ + idx for idx in indexes if idx.tags is not None and idx.tags.get("test-run") == RUN_ID + ] + + logger.info(f"Indexes to delete: {[idx.name for idx in test_indexes]}") + + for idx in test_indexes: + if idx.deletion_protection == "enabled": + logger.info(f"Disabling deletion protection for index: {idx.name}") + pc.db.index.configure(name=idx.name, deletion_protection="disabled") + # Wait for index to be updated with status ready + logger.info(f"Waiting for index {idx.name} to be ready...") + timeout = 60 + while True and timeout > 0: + is_ready = pc.db.index.describe(name=idx.name).ready + if is_ready: + break + time.sleep(1) + timeout -= 1 + if timeout <= 0: + logger.warning(f"Index {idx.name} did not become ready in time") + else: + logger.info(f"Deletion protection is already disabled for index: {idx.name}") + + for idx in test_indexes: + try: + logger.info(f"Deleting index: {idx.name}") + pc.db.index.delete(name=idx.name, timeout=-1) + except Exception as e: + logger.warning(f"Failed to delete index {idx.name}: {str(e)}") + + backups = pc.db.backup.list() + if len(backups) > 0: + logger.info(f"Deleting {len(backups)} backups") + for backup in backups: + logger.debug(f"Deleting backup: {backup.name}") + try: + pc.db.backup.delete(backup_id=backup.backup_id) + except Exception as e: + logger.warning(f"Failed to delete backup: {backup.name}: {str(e)}") + else: + logger.info("No backups to delete") + + except Exception as e: + logger.error(f"Error during final cleanup: {str(e)}") + + logger.info("Final cleanup completed") diff --git a/tests/integration/control/backup/test_backup.py b/tests/integration/control/backup/test_backup.py new file mode 100644 index 000000000..2226928f8 --- /dev/null +++ b/tests/integration/control/backup/test_backup.py @@ -0,0 +1,195 @@ +import pytest +import random +from ...helpers import random_string, poll_stats_for_namespace +import logging +import time + +logger = logging.getLogger(__name__) + + +class TestBackups: + def test_create_backup(self, pc, ready_sl_index, index_tags): + desc = pc.db.index.describe(name=ready_sl_index) + dimension = desc.dimension + + # Upsert some sample data + ns = random_string(10) + idx = pc.Index(name=ready_sl_index) + batch_size = 100 + num_batches = 10 + for _ in range(num_batches): + idx.upsert( + vectors=[ + {"id": random_string(15), "values": [random.random() for _ in range(dimension)]} + for _ in range(batch_size) + ], + namespace=ns, + ) + + poll_stats_for_namespace(idx=idx, namespace=ns, expected_count=batch_size * num_batches) + logger.debug("Sleeping for 180 seconds to ensure vectors are indexed") + time.sleep(180) + + index_stats = idx.describe_index_stats() + logger.debug(f"Index stats for index {ready_sl_index}: {index_stats}") + + backup_name = "backup-" + random_string(10) + backup = pc.db.backup.create(backup_name=backup_name, index_name=ready_sl_index) + assert backup.backup_id is not None + assert backup.name == backup_name + assert backup.source_index_name == ready_sl_index + + # Describe the backup + backup_desc = pc.db.backup.describe(backup_id=backup.backup_id) + assert backup_desc.name == backup_name + assert backup_desc.backup_id == backup.backup_id + assert backup_desc.source_index_name == ready_sl_index + logger.info(f"Backup description: {backup_desc}") + + # Wait for the backup to be ready before proceeding + backup_ready = False + max_wait = 60 + while not backup_ready: + backup_desc = pc.db.backup.describe(backup_id=backup.backup_id) + logger.info(f"Backup description: {backup_desc}") + if backup_desc.status == "Ready": + backup_ready = True + else: + if max_wait <= 0: + raise Exception("Backup did not become ready in time") + max_wait -= 5 + time.sleep(5) + + # Verify that the backup shows in list + backups_list = pc.db.backup.list(index_name=ready_sl_index) + assert len(backups_list) >= 1 + assert any(b.name == backup_name for b in backups_list) + assert any(b.backup_id == backup.backup_id for b in backups_list) + assert any(b.source_index_name == ready_sl_index for b in backups_list) + + # Create index from backup + new_index_name = "from-backup-" + random_string(10) + new_index = pc.db.index.create_from_backup( + name=new_index_name, backup_id=backup.backup_id, tags=index_tags + ) + assert new_index.name == new_index_name + assert new_index.tags is not None + assert new_index.dimension == desc.dimension + assert new_index.metric == desc.metric + + # Can list restore jobs + restore_jobs = pc.db.restore_job.list(index_name=new_index_name) + assert len(restore_jobs) == 1 + + # Verify that the new index has the same data as the original index + new_idx = pc.Index(name=new_index_name) + stats = new_idx.describe_index_stats() + logger.info(f"New index stats: {stats}") + assert stats.namespaces[ns].vector_count == batch_size * num_batches + + # Delete the new index + pc.db.index.delete(name=new_index_name) + + # Delete the backup + pc.db.backup.delete(backup_id=backup.backup_id) + + # Verify that the backup is deleted + with pytest.raises(Exception): + pc.db.backup.describe(backup_id=backup.backup_id) + + # Verify that the new index is deleted + backup_list = pc.db.backup.list() + assert len(backup_list) == 0 + + def test_create_backup_legacy_syntax(self, pc, ready_sl_index, index_tags): + desc = pc.describe_index(name=ready_sl_index) + dimension = desc.dimension + + # Upsert some sample data + ns = random_string(10) + idx = pc.Index(name=ready_sl_index) + batch_size = 100 + num_batches = 10 + for _ in range(num_batches): + idx.upsert( + vectors=[ + {"id": random_string(15), "values": [random.random() for _ in range(dimension)]} + for _ in range(batch_size) + ], + namespace=ns, + ) + + poll_stats_for_namespace(idx=idx, namespace=ns, expected_count=batch_size * num_batches) + logger.debug("Sleeping for 180 seconds to ensure vectors are indexed") + time.sleep(180) + + index_stats = idx.describe_index_stats() + logger.debug(f"Index stats for index {ready_sl_index}: {index_stats}") + + backup_name = "backup-" + random_string(10) + backup = pc.create_backup(backup_name=backup_name, index_name=ready_sl_index) + assert backup.backup_id is not None + assert backup.name == backup_name + assert backup.source_index_name == ready_sl_index + + # Describe the backup + backup_desc = pc.describe_backup(backup_id=backup.backup_id) + assert backup_desc.name == backup_name + assert backup_desc.backup_id == backup.backup_id + assert backup_desc.source_index_name == ready_sl_index + logger.info(f"Backup description: {backup_desc}") + + # Wait for the backup to be ready before proceeding + backup_ready = False + max_wait = 60 + while not backup_ready: + backup_desc = pc.describe_backup(backup_id=backup.backup_id) + logger.info(f"Backup description: {backup_desc}") + if backup_desc.status == "Ready": + backup_ready = True + else: + if max_wait <= 0: + raise Exception("Backup did not become ready in time") + max_wait -= 5 + time.sleep(5) + + # Verify that the backup shows in list + backups_list = pc.list_backups(index_name=ready_sl_index) + assert len(backups_list) >= 1 + assert any(b.name == backup_name for b in backups_list) + assert any(b.backup_id == backup.backup_id for b in backups_list) + assert any(b.source_index_name == ready_sl_index for b in backups_list) + + # Create index from backup + new_index_name = "from-backup-" + random_string(10) + new_index = pc.create_index_from_backup( + name=new_index_name, backup_id=backup.backup_id, tags=index_tags + ) + assert new_index.name == new_index_name + assert new_index.tags is not None + assert new_index.dimension == desc.dimension + assert new_index.metric == desc.metric + + # Can list restore jobs + restore_jobs = pc.list_restore_jobs(index_name=new_index_name) + assert len(restore_jobs) == 1 + + # Verify that the new index has the same data as the original index + new_idx = pc.Index(name=new_index_name) + stats = new_idx.describe_index_stats() + logger.info(f"New index stats: {stats}") + assert stats.namespaces[ns].vector_count == batch_size * num_batches + + # Delete the new index + pc.delete_index(name=new_index_name) + + # Delete the backup + pc.delete_backup(backup_id=backup.backup_id) + + # Verify that the backup is deleted + with pytest.raises(Exception): + pc.describe_backup(backup_id=backup.backup_id) + + # Verify that the new index is deleted + backup_list = pc.list_backups(index_name=ready_sl_index) + assert len(backup_list) == 0 diff --git a/tests/integration/control/restore_job/__init__.py b/tests/integration/control/restore_job/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/control/restore_job/conftest.py b/tests/integration/control/restore_job/conftest.py new file mode 100644 index 000000000..9798da273 --- /dev/null +++ b/tests/integration/control/restore_job/conftest.py @@ -0,0 +1,168 @@ +import pytest +import uuid +import time +import logging +import dotenv +from pinecone import Pinecone, NotFoundException, PineconeApiException +from ...helpers import generate_index_name, get_environment_var, index_tags as index_tags_helper + +dotenv.load_dotenv() + +logger = logging.getLogger(__name__) +""" @private """ + +# Generate a unique ID for the entire test run +RUN_ID = str(uuid.uuid4()) + + +@pytest.fixture() +def index_tags(request): + return index_tags_helper(request, RUN_ID) + + +@pytest.fixture() +def pc(): + api_key = get_environment_var("PINECONE_API_KEY") + return Pinecone( + api_key=api_key, additional_headers={"sdk-test-suite": "pinecone-python-client"} + ) + + +@pytest.fixture() +def serverless_cloud(): + return get_environment_var("SERVERLESS_CLOUD", "aws") + + +@pytest.fixture() +def serverless_region(): + return get_environment_var("SERVERLESS_REGION", "us-west-2") + + +@pytest.fixture() +def create_sl_index_params(index_name, serverless_cloud, serverless_region, index_tags): + spec = {"serverless": {"cloud": serverless_cloud, "region": serverless_region}} + return dict(name=index_name, dimension=10, metric="cosine", spec=spec, tags=index_tags) + + +@pytest.fixture() +def index_name(request): + test_name = request.node.name + return generate_index_name(test_name) + + +@pytest.fixture() +def ready_sl_index(pc, index_name, create_sl_index_params): + create_sl_index_params["timeout"] = None + pc.create_index(**create_sl_index_params) + yield index_name + pc.db.index.delete(name=index_name, timeout=-1) + + +def delete_with_retry(pc, index_name, retries=0, sleep_interval=5): + logger.debug( + "Deleting index " + + index_name + + ", retry " + + str(retries) + + ", next sleep interval " + + str(sleep_interval) + ) + try: + pc.db.index.delete(name=index_name, timeout=-1) + except NotFoundException: + pass + except PineconeApiException as e: + if e.error.code == "PRECONDITON_FAILED": + if retries > 5: + raise Exception("Unable to delete index " + index_name) + time.sleep(sleep_interval) + delete_with_retry(pc, index_name, retries + 1, sleep_interval * 2) + else: + logger.error(e.__class__) + logger.error(e) + raise Exception("Unable to delete index " + index_name) + except Exception as e: + logger.error(e.__class__) + logger.error(e) + raise Exception("Unable to delete index " + index_name) + + +@pytest.fixture(autouse=True) +def cleanup(pc, index_name): + yield + + try: + desc = pc.db.index.describe(name=index_name) + if desc.deletion_protection == "enabled": + logger.info(f"Disabling deletion protection for index: {index_name}") + pc.db.index.configure(name=index_name, deletion_protection="disabled") + logger.debug("Attempting to delete index with name: " + index_name) + pc.db.index.delete(name=index_name, timeout=-1) + except Exception: + pass + + for backup in pc.db.backup.list(): + logger.debug(f"Deleting backup: {backup.name}") + try: + pc.db.backup.delete(backup_id=backup.backup_id) + except Exception as e: + logger.warning(f"Failed to delete backup: {backup.name}: {str(e)}") + + +def pytest_sessionfinish(session, exitstatus): + """ + Hook that runs after all tests have completed. + This is a good place to clean up any resources that were created during the test session. + """ + logger.info("Running final cleanup after all tests...") + + try: + pc = Pinecone() + indexes = pc.db.index.list() + test_indexes = [ + idx for idx in indexes if idx.tags is not None and idx.tags.get("test-run") == RUN_ID + ] + + logger.info(f"Indexes to delete: {[idx.name for idx in test_indexes]}") + + for idx in test_indexes: + if idx.deletion_protection == "enabled": + logger.info(f"Disabling deletion protection for index: {idx.name}") + pc.db.index.configure(name=idx.name, deletion_protection="disabled") + # Wait for index to be updated with status ready + logger.info(f"Waiting for index {idx.name} to be ready...") + timeout = 60 + while True and timeout > 0: + is_ready = pc.db.index.describe(name=idx.name).ready + if is_ready: + break + time.sleep(1) + timeout -= 1 + if timeout <= 0: + logger.warning(f"Index {idx.name} did not become ready in time") + else: + logger.info(f"Deletion protection is already disabled for index: {idx.name}") + + for idx in test_indexes: + try: + logger.info(f"Deleting index: {idx.name}") + pc.db.index.delete(name=idx.name, timeout=-1) + except Exception as e: + logger.warning(f"Failed to delete index {idx.name}: {str(e)}") + + backups = pc.db.backup.list() + if len(backups) > 0: + logger.info(f"Deleting {len(backups)} backups") + for backup in backups: + logger.debug(f"Deleting backup: {backup.name}") + try: + pc.db.backup.delete(backup_id=backup.backup_id) + except Exception as e: + logger.warning(f"Failed to delete backup: {backup.name}: {str(e)}") + else: + logger.info("No backups to delete") + + except Exception as e: + logger.error(f"Error during final cleanup: {str(e)}") + + logger.info("Final cleanup completed") diff --git a/tests/integration/control/restore_job/test_describe.py b/tests/integration/control/restore_job/test_describe.py new file mode 100644 index 000000000..7b3809b90 --- /dev/null +++ b/tests/integration/control/restore_job/test_describe.py @@ -0,0 +1,38 @@ +import pytest +from pinecone import Pinecone, PineconeApiException +import logging +from datetime import datetime + +logger = logging.getLogger(__name__) + + +class TestRestoreJobDescribe: + def test_describe_restore_job(self, pc: Pinecone): + jobs = pc.db.restore_job.list() + assert len(jobs.data) >= 1 + + restore_job_id = jobs.data[0].restore_job_id + restore_job = pc.db.restore_job.describe(job_id=restore_job_id) + logger.debug(f"Restore job: {restore_job}") + + assert restore_job.restore_job_id == restore_job_id + assert restore_job.backup_id is not None + assert isinstance(restore_job.status, str) + assert isinstance(restore_job.backup_id, str) + assert isinstance(restore_job.completed_at, datetime) + assert isinstance(restore_job.created_at, datetime) + assert isinstance(restore_job.percent_complete, float) + assert isinstance(restore_job.target_index_id, str) + assert isinstance(restore_job.target_index_name, str) + + def test_describe_restore_job_legacy_syntax(self, pc: Pinecone): + jobs = pc.list_restore_jobs() + assert len(jobs.data) >= 1 + + restore_job_id = jobs.data[0].restore_job_id + restore_job = pc.describe_restore_job(job_id=restore_job_id) + logger.debug(f"Restore job: {restore_job}") + + def test_describe_restore_job_with_invalid_job_id(self, pc: Pinecone): + with pytest.raises(PineconeApiException): + pc.db.restore_job.describe(job_id="invalid") diff --git a/tests/integration/control/restore_job/test_list.py b/tests/integration/control/restore_job/test_list.py new file mode 100644 index 000000000..379b37dda --- /dev/null +++ b/tests/integration/control/restore_job/test_list.py @@ -0,0 +1,58 @@ +import pytest +import logging +from pinecone import Pinecone, PineconeApiValueError, PineconeApiException + +logger = logging.getLogger(__name__) + + +class TestRestoreJobList: + def test_list_restore_jobs_no_arguments(self, pc: Pinecone): + restore_jobs = pc.db.restore_job.list() + assert restore_jobs.data is not None + logger.debug(f"Restore jobs count: {len(restore_jobs.data)}") + + # This assumes the backup test has been run at least once + # in the same project. + assert len(restore_jobs.data) >= 1 + + def test_list_restore_jobs_with_optional_arguments(self, pc: Pinecone): + restore_jobs = pc.db.restore_job.list(limit=2) + assert restore_jobs.data is not None + logger.debug(f"Restore jobs count: {len(restore_jobs.data)}") + assert len(restore_jobs.data) <= 2 + + if len(restore_jobs.data) == 2: + logger.debug(f"Restore jobs pagination: {restore_jobs.pagination}") + assert restore_jobs.pagination is not None + assert restore_jobs.pagination.next is not None + + next_page = pc.db.restore_job.list( + limit=2, pagination_token=restore_jobs.pagination.next + ) + assert next_page.data is not None + assert len(next_page.data) <= 2 + + def test_list_restore_jobs_legacy_syntax(self, pc: Pinecone): + restore_jobs = pc.list_restore_jobs(limit=2) + assert restore_jobs.data is not None + logger.debug(f"Restore jobs count: {len(restore_jobs.data)}") + assert len(restore_jobs.data) <= 2 + + if len(restore_jobs.data) == 2: + logger.debug(f"Restore jobs pagination: {restore_jobs.pagination}") + assert restore_jobs.pagination is not None + assert restore_jobs.pagination.next is not None + + next_page = pc.list_restore_jobs(limit=2, pagination_token=restore_jobs.pagination.next) + assert next_page.data is not None + assert len(next_page.data) <= 2 + + +class TestRestoreJobListErrors: + def test_list_restore_jobs_with_invalid_limit(self, pc: Pinecone): + with pytest.raises(PineconeApiValueError): + pc.db.restore_job.list(limit=-1) + + def test_list_restore_jobs_with_invalid_pagination_token(self, pc: Pinecone): + with pytest.raises(PineconeApiException): + pc.db.restore_job.list(pagination_token="invalid") diff --git a/tests/integration/control_asyncio/backup/__init__.py b/tests/integration/control_asyncio/backup/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/control_asyncio/backup/conftest.py b/tests/integration/control_asyncio/backup/conftest.py new file mode 100644 index 000000000..3a7a56078 --- /dev/null +++ b/tests/integration/control_asyncio/backup/conftest.py @@ -0,0 +1,220 @@ +import pytest +import time +import random +import asyncio +import uuid +from ...helpers import get_environment_var, generate_index_name, index_tags as index_tags_helper +import logging +from typing import Callable, Optional, Awaitable, Union + +from pinecone import ( + CloudProvider, + AwsRegion, + ServerlessSpec, + PineconeApiException, + NotFoundException, +) + +logger = logging.getLogger(__name__) +""" @private """ + +# Generate a unique ID for the entire test run +RUN_ID = str(uuid.uuid4()) + + +@pytest.fixture() +def index_tags(request): + return index_tags_helper(request, RUN_ID) + + +logger = logging.getLogger(__name__) + + +def build_client(): + from pinecone import PineconeAsyncio + + return PineconeAsyncio() + + +@pytest.fixture(scope="session") +def client(): + # This returns the sync client. Not for use in tests + # but can be used to help with cleanup after test runs + from pinecone import Pinecone + + return Pinecone() + + +async def poll_for_freshness(asyncio_idx, target_namespace, target_vector_count): + max_wait_time = 60 * 3 # 3 minutes + time_waited = 0 + wait_per_iteration = 5 + + while True: + stats = await asyncio_idx.describe_index_stats() + logger.debug( + "Polling for freshness on index %s. Current vector count: %s. Waiting for: %s", + asyncio_idx, + stats.total_vector_count, + target_vector_count, + ) + if target_namespace == "": + if stats.total_vector_count >= target_vector_count: + break + else: + if ( + target_namespace in stats.namespaces + and stats.namespaces[target_namespace].vector_count >= target_vector_count + ): + break + time_waited += wait_per_iteration + if time_waited >= max_wait_time: + raise TimeoutError( + "Timeout waiting for index to have expected vector count of {}".format( + target_vector_count + ) + ) + await asyncio.sleep(wait_per_iteration) + + return stats + + +async def wait_until( + condition: Union[Callable[[], bool], Callable[[], Awaitable[bool]]], + timeout: Optional[float] = 10.0, + interval: float = 0.1, +) -> None: + """ + Waits asynchronously until the given (async or sync) condition returns True or times out. + + Args: + condition: A callable that returns a boolean or an awaitable boolean, indicating if the wait is over. + timeout: Maximum time in seconds to wait for the condition to become True. If None, wait indefinitely. + interval: Time in seconds between checks of the condition. + + Raises: + asyncio.TimeoutError: If the condition is not met within the timeout period. + """ + start_time = asyncio.get_event_loop().time() + + while True: + result = await condition() if asyncio.iscoroutinefunction(condition) else condition() + if result: + return + + if timeout is not None and (asyncio.get_event_loop().time() - start_time) > timeout: + raise asyncio.TimeoutError("Condition not met within the timeout period.") + + remaining_time = ( + (start_time + timeout) - asyncio.get_event_loop().time() + if timeout is not None + else None + ) + logger.debug( + "Condition not met yet. Waiting for %.2f seconds. Timeout in %.2f seconds.", + interval, + remaining_time, + ) + await asyncio.sleep(interval) + + +@pytest.fixture() +def serverless_cloud(): + return get_environment_var("SERVERLESS_CLOUD", "aws") + + +@pytest.fixture() +def serverless_region(): + return get_environment_var("SERVERLESS_REGION", "us-west-2") + + +@pytest.fixture() +def spec1(serverless_cloud, serverless_region): + return {"serverless": {"cloud": serverless_cloud, "region": serverless_region}} + + +@pytest.fixture() +def spec2(): + return ServerlessSpec(cloud=CloudProvider.AWS, region=AwsRegion.US_EAST_1) + + +@pytest.fixture() +def spec3(): + return {"serverless": {"cloud": CloudProvider.AWS, "region": AwsRegion.US_EAST_1}} + + +@pytest.fixture() +def create_sl_index_params(index_name, serverless_cloud, serverless_region): + spec = {"serverless": {"cloud": serverless_cloud, "region": serverless_region}} + return dict(name=index_name, dimension=10, metric="cosine", spec=spec) + + +@pytest.fixture() +def random_vector(): + return [random.uniform(0, 1) for _ in range(10)] + + +@pytest.fixture() +def index_name(request): + test_name = request.node.name + return generate_index_name(test_name) + + +@pytest.fixture() +def ready_sl_index(client, index_name, create_sl_index_params): + create_sl_index_params["timeout"] = None + client.create_index(**create_sl_index_params) + yield index_name + client.delete_index(index_name, -1) + + +@pytest.fixture() +def notready_sl_index(client, index_name, create_sl_index_params): + client.create_index(**create_sl_index_params, timeout=-1) + yield index_name + + +def delete_with_retry(client, index_name, retries=0, sleep_interval=5): + logger.info( + f"Deleting index {index_name}, retry {retries}, next sleep interval {sleep_interval}" + ) + try: + client.delete_index(index_name, -1) + except NotFoundException: + pass + except PineconeApiException as e: + if e.error.code == "PRECONDITON_FAILED": + if retries > 5: + raise "Unable to delete index " + index_name + time.sleep(sleep_interval) + delete_with_retry(client, index_name, retries + 1, sleep_interval * 2) + else: + print(e.__class__) + print(e) + raise "Unable to delete index " + index_name + except Exception as e: + logger.warning(f"Failed to delete index: {index_name}: {str(e)}") + raise "Unable to delete index " + index_name + + +@pytest.fixture(autouse=True) +async def cleanup(client, index_name): + yield + + try: + desc = client.index.describe(name=index_name) + if desc.deletion_protection == "enabled": + logger.info(f"Disabling deletion protection for index: {index_name}") + client.index.configure(name=index_name, deletion_protection="disabled") + logger.debug("Attempting to delete index with name: " + index_name) + client.index.delete(name=index_name, timeout=-1) + except Exception as e: + logger.warning(f"Failed to delete index: {index_name}: {str(e)}") + pass + + for backup in client.db.backup.list(): + logger.debug(f"Deleting backup: {backup.name}") + try: + client.db.backup.delete(backup_id=backup.backup_id) + except Exception as e: + logger.warning(f"Failed to delete backup: {backup.name}: {str(e)}") diff --git a/tests/integration/control_asyncio/backup/test_backup.py b/tests/integration/control_asyncio/backup/test_backup.py new file mode 100644 index 000000000..65be3e8b8 --- /dev/null +++ b/tests/integration/control_asyncio/backup/test_backup.py @@ -0,0 +1,205 @@ +import pytest +import random +import asyncio +from ...helpers import random_string +import logging +from pinecone import PineconeAsyncio + +logger = logging.getLogger(__name__) + + +@pytest.mark.asyncio +class TestBackups: + async def test_create_backup(self, ready_sl_index, index_tags): + async with PineconeAsyncio() as pc: + desc = await pc.db.index.describe(name=ready_sl_index) + dimension = desc.dimension + + # Upsert some sample data + ns = random_string(10) + async with pc.IndexAsyncio(host=desc.host) as idx: + batch_size = 100 + num_batches = 10 + for _ in range(num_batches): + await idx.upsert( + vectors=[ + { + "id": random_string(15), + "values": [random.random() for _ in range(dimension)], + } + for _ in range(batch_size) + ], + namespace=ns, + ) + + logger.debug("Sleeping for 180 seconds to ensure vectors are indexed") + await asyncio.sleep(180) + + index_stats = await idx.describe_index_stats() + logger.debug(f"Index stats for index {ready_sl_index}: {index_stats}") + + backup_name = "backup-" + random_string(10) + backup = await pc.db.backup.create(backup_name=backup_name, index_name=ready_sl_index) + assert backup.backup_id is not None + assert backup.name == backup_name + assert backup.source_index_name == ready_sl_index + + # Describe the backup + backup_desc = await pc.db.backup.describe(backup_id=backup.backup_id) + assert backup_desc.name == backup_name + assert backup_desc.backup_id == backup.backup_id + assert backup_desc.source_index_name == ready_sl_index + logger.info(f"Backup description: {backup_desc}") + + # Wait for the backup to be ready before proceeding + backup_ready = False + max_wait = 60 + while not backup_ready: + backup_desc = await pc.db.backup.describe(backup_id=backup.backup_id) + logger.info(f"Backup description: {backup_desc}") + if backup_desc.status == "Ready": + backup_ready = True + else: + if max_wait <= 0: + raise Exception("Backup did not become ready in time") + max_wait -= 5 + await asyncio.sleep(5) + + # Verify that the backup shows in list + backups_list = await pc.db.backup.list(index_name=ready_sl_index) + assert len(backups_list) >= 1 + assert any(b.name == backup_name for b in backups_list) + assert any(b.backup_id == backup.backup_id for b in backups_list) + assert any(b.source_index_name == ready_sl_index for b in backups_list) + + # Create index from backup + new_index_name = "from-backup-" + random_string(10) + new_index = await pc.db.index.create_from_backup( + name=new_index_name, backup_id=backup.backup_id, tags=index_tags + ) + assert new_index.name == new_index_name + assert new_index.tags is not None + assert new_index.dimension == desc.dimension + assert new_index.metric == desc.metric + + # Can list restore jobs + restore_jobs = await pc.db.restore_job.list() + assert len(restore_jobs) >= 1 + + # Verify that the new index has the same data as the original index + new_desc = await pc.db.index.describe(name=new_index_name) + async with pc.IndexAsyncio(host=new_desc.host) as new_idx: + stats = await new_idx.describe_index_stats() + logger.info(f"New index stats: {stats}") + assert stats.namespaces[ns].vector_count == batch_size * num_batches + + # Delete the new index + await pc.db.index.delete(name=new_index_name) + + # Delete the backup + await pc.db.backup.delete(backup_id=backup.backup_id) + + # Verify that the backup is deleted + with pytest.raises(Exception): + await pc.db.backup.describe(backup_id=backup.backup_id) + + # Verify that the new index is deleted + backup_list = await pc.db.backup.list() + assert len(backup_list) == 0 + + async def test_create_backup_legacy_syntax(self, ready_sl_index, index_tags): + async with PineconeAsyncio() as pc: + desc = await pc.describe_index(name=ready_sl_index) + dimension = desc.dimension + + # Upsert some sample data + ns = random_string(10) + async with pc.IndexAsyncio(host=desc.host) as idx: + batch_size = 100 + num_batches = 10 + for _ in range(num_batches): + await idx.upsert( + vectors=[ + { + "id": random_string(15), + "values": [random.random() for _ in range(dimension)], + } + for _ in range(batch_size) + ], + namespace=ns, + ) + + logger.debug("Sleeping for 180 seconds to ensure vectors are indexed") + await asyncio.sleep(180) + + index_stats = await idx.describe_index_stats() + logger.debug(f"Index stats for index {ready_sl_index}: {index_stats}") + + backup_name = "backup-" + random_string(10) + backup = await pc.create_backup(backup_name=backup_name, index_name=ready_sl_index) + assert backup.backup_id is not None + assert backup.name == backup_name + assert backup.source_index_name == ready_sl_index + + # Describe the backup + backup_desc = await pc.describe_backup(backup_id=backup.backup_id) + assert backup_desc.name == backup_name + assert backup_desc.backup_id == backup.backup_id + assert backup_desc.source_index_name == ready_sl_index + logger.info(f"Backup description: {backup_desc}") + + # Wait for the backup to be ready before proceeding + backup_ready = False + max_wait = 60 + while not backup_ready: + backup_desc = await pc.describe_backup(backup_id=backup.backup_id) + logger.info(f"Backup description: {backup_desc}") + if backup_desc.status == "Ready": + backup_ready = True + else: + if max_wait <= 0: + raise Exception("Backup did not become ready in time") + max_wait -= 5 + await asyncio.sleep(5) + + # Verify that the backup shows in list + backups_list = await pc.list_backups(index_name=ready_sl_index) + assert len(backups_list) >= 1 + assert any(b.name == backup_name for b in backups_list) + assert any(b.backup_id == backup.backup_id for b in backups_list) + assert any(b.source_index_name == ready_sl_index for b in backups_list) + + # Create index from backup + new_index_name = "from-backup-" + random_string(10) + new_index = await pc.create_index_from_backup( + name=new_index_name, backup_id=backup.backup_id, tags=index_tags + ) + assert new_index.name == new_index_name + assert new_index.tags is not None + assert new_index.dimension == desc.dimension + assert new_index.metric == desc.metric + + # Can list restore jobs + restore_jobs = await pc.list_restore_jobs() + assert len(restore_jobs) >= 1 + + # Verify that the new index has the same data as the original index + new_desc = await pc.db.index.describe(name=new_index_name) + async with pc.IndexAsyncio(host=new_desc.host) as new_idx: + stats = await new_idx.describe_index_stats() + logger.info(f"New index stats: {stats}") + assert stats.namespaces[ns].vector_count == batch_size * num_batches + + # Delete the new index + await pc.delete_index(name=new_index_name) + + # Delete the backup + await pc.delete_backup(backup_id=backup.backup_id) + + # Verify that the backup is deleted + with pytest.raises(Exception): + await pc.describe_backup(backup_id=backup.backup_id) + + # Verify that the new index is deleted + backup_list = await pc.list_backups(index_name=ready_sl_index) + assert len(backup_list) == 0 diff --git a/tests/integration/control_asyncio/restore_job/__init__.py b/tests/integration/control_asyncio/restore_job/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/control_asyncio/restore_job/conftest.py b/tests/integration/control_asyncio/restore_job/conftest.py new file mode 100644 index 000000000..3a7a56078 --- /dev/null +++ b/tests/integration/control_asyncio/restore_job/conftest.py @@ -0,0 +1,220 @@ +import pytest +import time +import random +import asyncio +import uuid +from ...helpers import get_environment_var, generate_index_name, index_tags as index_tags_helper +import logging +from typing import Callable, Optional, Awaitable, Union + +from pinecone import ( + CloudProvider, + AwsRegion, + ServerlessSpec, + PineconeApiException, + NotFoundException, +) + +logger = logging.getLogger(__name__) +""" @private """ + +# Generate a unique ID for the entire test run +RUN_ID = str(uuid.uuid4()) + + +@pytest.fixture() +def index_tags(request): + return index_tags_helper(request, RUN_ID) + + +logger = logging.getLogger(__name__) + + +def build_client(): + from pinecone import PineconeAsyncio + + return PineconeAsyncio() + + +@pytest.fixture(scope="session") +def client(): + # This returns the sync client. Not for use in tests + # but can be used to help with cleanup after test runs + from pinecone import Pinecone + + return Pinecone() + + +async def poll_for_freshness(asyncio_idx, target_namespace, target_vector_count): + max_wait_time = 60 * 3 # 3 minutes + time_waited = 0 + wait_per_iteration = 5 + + while True: + stats = await asyncio_idx.describe_index_stats() + logger.debug( + "Polling for freshness on index %s. Current vector count: %s. Waiting for: %s", + asyncio_idx, + stats.total_vector_count, + target_vector_count, + ) + if target_namespace == "": + if stats.total_vector_count >= target_vector_count: + break + else: + if ( + target_namespace in stats.namespaces + and stats.namespaces[target_namespace].vector_count >= target_vector_count + ): + break + time_waited += wait_per_iteration + if time_waited >= max_wait_time: + raise TimeoutError( + "Timeout waiting for index to have expected vector count of {}".format( + target_vector_count + ) + ) + await asyncio.sleep(wait_per_iteration) + + return stats + + +async def wait_until( + condition: Union[Callable[[], bool], Callable[[], Awaitable[bool]]], + timeout: Optional[float] = 10.0, + interval: float = 0.1, +) -> None: + """ + Waits asynchronously until the given (async or sync) condition returns True or times out. + + Args: + condition: A callable that returns a boolean or an awaitable boolean, indicating if the wait is over. + timeout: Maximum time in seconds to wait for the condition to become True. If None, wait indefinitely. + interval: Time in seconds between checks of the condition. + + Raises: + asyncio.TimeoutError: If the condition is not met within the timeout period. + """ + start_time = asyncio.get_event_loop().time() + + while True: + result = await condition() if asyncio.iscoroutinefunction(condition) else condition() + if result: + return + + if timeout is not None and (asyncio.get_event_loop().time() - start_time) > timeout: + raise asyncio.TimeoutError("Condition not met within the timeout period.") + + remaining_time = ( + (start_time + timeout) - asyncio.get_event_loop().time() + if timeout is not None + else None + ) + logger.debug( + "Condition not met yet. Waiting for %.2f seconds. Timeout in %.2f seconds.", + interval, + remaining_time, + ) + await asyncio.sleep(interval) + + +@pytest.fixture() +def serverless_cloud(): + return get_environment_var("SERVERLESS_CLOUD", "aws") + + +@pytest.fixture() +def serverless_region(): + return get_environment_var("SERVERLESS_REGION", "us-west-2") + + +@pytest.fixture() +def spec1(serverless_cloud, serverless_region): + return {"serverless": {"cloud": serverless_cloud, "region": serverless_region}} + + +@pytest.fixture() +def spec2(): + return ServerlessSpec(cloud=CloudProvider.AWS, region=AwsRegion.US_EAST_1) + + +@pytest.fixture() +def spec3(): + return {"serverless": {"cloud": CloudProvider.AWS, "region": AwsRegion.US_EAST_1}} + + +@pytest.fixture() +def create_sl_index_params(index_name, serverless_cloud, serverless_region): + spec = {"serverless": {"cloud": serverless_cloud, "region": serverless_region}} + return dict(name=index_name, dimension=10, metric="cosine", spec=spec) + + +@pytest.fixture() +def random_vector(): + return [random.uniform(0, 1) for _ in range(10)] + + +@pytest.fixture() +def index_name(request): + test_name = request.node.name + return generate_index_name(test_name) + + +@pytest.fixture() +def ready_sl_index(client, index_name, create_sl_index_params): + create_sl_index_params["timeout"] = None + client.create_index(**create_sl_index_params) + yield index_name + client.delete_index(index_name, -1) + + +@pytest.fixture() +def notready_sl_index(client, index_name, create_sl_index_params): + client.create_index(**create_sl_index_params, timeout=-1) + yield index_name + + +def delete_with_retry(client, index_name, retries=0, sleep_interval=5): + logger.info( + f"Deleting index {index_name}, retry {retries}, next sleep interval {sleep_interval}" + ) + try: + client.delete_index(index_name, -1) + except NotFoundException: + pass + except PineconeApiException as e: + if e.error.code == "PRECONDITON_FAILED": + if retries > 5: + raise "Unable to delete index " + index_name + time.sleep(sleep_interval) + delete_with_retry(client, index_name, retries + 1, sleep_interval * 2) + else: + print(e.__class__) + print(e) + raise "Unable to delete index " + index_name + except Exception as e: + logger.warning(f"Failed to delete index: {index_name}: {str(e)}") + raise "Unable to delete index " + index_name + + +@pytest.fixture(autouse=True) +async def cleanup(client, index_name): + yield + + try: + desc = client.index.describe(name=index_name) + if desc.deletion_protection == "enabled": + logger.info(f"Disabling deletion protection for index: {index_name}") + client.index.configure(name=index_name, deletion_protection="disabled") + logger.debug("Attempting to delete index with name: " + index_name) + client.index.delete(name=index_name, timeout=-1) + except Exception as e: + logger.warning(f"Failed to delete index: {index_name}: {str(e)}") + pass + + for backup in client.db.backup.list(): + logger.debug(f"Deleting backup: {backup.name}") + try: + client.db.backup.delete(backup_id=backup.backup_id) + except Exception as e: + logger.warning(f"Failed to delete backup: {backup.name}: {str(e)}") diff --git a/tests/integration/control_asyncio/restore_job/test_describe.py b/tests/integration/control_asyncio/restore_job/test_describe.py new file mode 100644 index 000000000..d32595cfd --- /dev/null +++ b/tests/integration/control_asyncio/restore_job/test_describe.py @@ -0,0 +1,42 @@ +import pytest +from pinecone import PineconeAsyncio, PineconeApiException +import logging +from datetime import datetime + +logger = logging.getLogger(__name__) + + +@pytest.mark.asyncio +class TestRestoreJobDescribe: + async def test_describe_restore_job(self): + async with PineconeAsyncio() as pc: + jobs = await pc.db.restore_job.list() + assert len(jobs.data) >= 1 + + restore_job_id = jobs.data[0].restore_job_id + restore_job = await pc.db.restore_job.describe(job_id=restore_job_id) + logger.debug(f"Restore job: {restore_job}") + + assert restore_job.restore_job_id == restore_job_id + assert restore_job.backup_id is not None + assert isinstance(restore_job.status, str) + assert isinstance(restore_job.backup_id, str) + assert isinstance(restore_job.completed_at, datetime) + assert isinstance(restore_job.created_at, datetime) + assert isinstance(restore_job.percent_complete, float) + assert isinstance(restore_job.target_index_id, str) + assert isinstance(restore_job.target_index_name, str) + + async def test_describe_restore_job_legacy_syntax(self): + async with PineconeAsyncio() as pc: + jobs = await pc.list_restore_jobs() + assert len(jobs.data) >= 1 + + restore_job_id = jobs.data[0].restore_job_id + restore_job = await pc.describe_restore_job(job_id=restore_job_id) + logger.debug(f"Restore job: {restore_job}") + + async def test_describe_restore_job_with_invalid_job_id(self): + async with PineconeAsyncio() as pc: + with pytest.raises(PineconeApiException): + await pc.db.restore_job.describe(job_id="invalid") diff --git a/tests/integration/control_asyncio/restore_job/test_list.py b/tests/integration/control_asyncio/restore_job/test_list.py new file mode 100644 index 000000000..0e0814da4 --- /dev/null +++ b/tests/integration/control_asyncio/restore_job/test_list.py @@ -0,0 +1,67 @@ +import pytest +import logging +from pinecone import PineconeAsyncio, PineconeApiValueError, PineconeApiException + +logger = logging.getLogger(__name__) + + +@pytest.mark.asyncio +class TestRestoreJobList: + async def test_list_restore_jobs_no_arguments(self): + async with PineconeAsyncio() as pc: + restore_jobs = await pc.db.restore_job.list() + assert restore_jobs.data is not None + logger.debug(f"Restore jobs count: {len(restore_jobs.data)}") + + # This assumes the backup test has been run at least once + # in the same project. + assert len(restore_jobs.data) >= 1 + + async def test_list_restore_jobs_with_optional_arguments(self): + async with PineconeAsyncio() as pc: + restore_jobs = await pc.db.restore_job.list(limit=2) + assert restore_jobs.data is not None + logger.debug(f"Restore jobs count: {len(restore_jobs.data)}") + assert len(restore_jobs.data) <= 2 + + if len(restore_jobs.data) == 2: + logger.debug(f"Restore jobs pagination: {restore_jobs.pagination}") + assert restore_jobs.pagination is not None + assert restore_jobs.pagination.next is not None + + next_page = await pc.db.restore_job.list( + limit=2, pagination_token=restore_jobs.pagination.next + ) + assert next_page.data is not None + assert len(next_page.data) <= 2 + + async def test_list_restore_jobs_legacy_syntax(self): + async with PineconeAsyncio() as pc: + restore_jobs = await pc.list_restore_jobs(limit=2) + assert restore_jobs.data is not None + logger.debug(f"Restore jobs count: {len(restore_jobs.data)}") + assert len(restore_jobs.data) <= 2 + + if len(restore_jobs.data) == 2: + logger.debug(f"Restore jobs pagination: {restore_jobs.pagination}") + assert restore_jobs.pagination is not None + assert restore_jobs.pagination.next is not None + + next_page = await pc.list_restore_jobs( + limit=2, pagination_token=restore_jobs.pagination.next + ) + assert next_page.data is not None + assert len(next_page.data) <= 2 + + +@pytest.mark.asyncio +class TestRestoreJobListErrors: + async def test_list_restore_jobs_with_invalid_limit(self): + async with PineconeAsyncio() as pc: + with pytest.raises(PineconeApiValueError): + await pc.db.restore_job.list(limit=-1) + + async def test_list_restore_jobs_with_invalid_pagination_token(self): + async with PineconeAsyncio() as pc: + with pytest.raises(PineconeApiException): + await pc.db.restore_job.list(pagination_token="invalid") diff --git a/tests/integration/helpers/helpers.py b/tests/integration/helpers/helpers.py index d9990df46..4dbe7d22a 100644 --- a/tests/integration/helpers/helpers.py +++ b/tests/integration/helpers/helpers.py @@ -91,6 +91,9 @@ def poll_stats_for_namespace( raise TimeoutError(f"Timed out waiting for namespace {namespace} to have vectors") else: total_time += delta_t + logger.debug( + f"Found {stats}. Waiting for {expected_count} vectors in namespace {namespace}." + ) time.sleep(delta_t) From 0a006187408807991976d653821499d06d970e33 Mon Sep 17 00:00:00 2001 From: Jen Hamon Date: Wed, 14 May 2025 00:55:55 -0400 Subject: [PATCH 4/7] Adjust test CI configuration --- .github/workflows/testing-integration.yaml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/testing-integration.yaml b/.github/workflows/testing-integration.yaml index 6756c7796..be5e6bf30 100644 --- a/.github/workflows/testing-integration.yaml +++ b/.github/workflows/testing-integration.yaml @@ -13,6 +13,14 @@ jobs: strategy: matrix: python_version: [3.9, 3.12] + test_suite: + - tests/integration/control/index + - tests/integration/control/collections + - tests/integration/control/backup + - tests/integration/control/restore_job + - tests/integration/control_asyncio/index + - tests/integration/control_asyncio/backup + - tests/integration/control_asyncio/restore_job steps: - uses: actions/checkout@v4 - name: 'Set up Python ${{ matrix.python_version }}' @@ -23,13 +31,8 @@ jobs: uses: ./.github/actions/setup-poetry with: include_asyncio: true - - name: 'Run index tests' - run: poetry run pytest tests/integration/control/index --retries 5 --retry-delay 35 -s -vv --log-cli-level=DEBUG - - name: 'Run collection tests' - run: poetry run pytest tests/integration/control/collections --retries 5 --retry-delay 35 -s -vv --log-cli-level=DEBUG - - name: 'Run backup & restore tests' - run: poetry run pytest tests/integration/control/backup --retries 5 --retry-delay 35 -s -vv --log-cli-level=DEBUG - + - name: 'Run tests' + run: poetry run pytest ${{ matrix.test_suite }} --retries 5 --retry-delay 35 -s -vv --log-cli-level=DEBUG inference: name: Inference tests From 41fc77f5bc850cb7dcb1bf20a5c359f87718badf Mon Sep 17 00:00:00 2001 From: Jen Hamon Date: Wed, 14 May 2025 01:10:28 -0400 Subject: [PATCH 5/7] Regenerate from spec updates --- codegen/apis | 2 +- .../db_control/api/manage_indexes_api.py | 54 ++++++++++++------- .../db_data/api/namespace_operations_api.py | 18 +++---- .../db_control/resources/asyncio/backup.py | 3 +- .../db_control/resources/asyncio/index.py | 18 +++---- pinecone/db_control/resources/sync/backup.py | 3 +- pinecone/db_control/resources/sync/index.py | 2 +- pinecone/openapi_support/api_version.py | 2 +- 8 files changed, 60 insertions(+), 42 deletions(-) diff --git a/codegen/apis b/codegen/apis index 4b1c83b3b..09015d910 160000 --- a/codegen/apis +++ b/codegen/apis @@ -1 +1 @@ -Subproject commit 4b1c83b3b6669e6596151a575c284ee2cf4977a7 +Subproject commit 09015d9106f2578e473f45f55120aafc5c559f2a diff --git a/pinecone/core/openapi/db_control/api/manage_indexes_api.py b/pinecone/core/openapi/db_control/api/manage_indexes_api.py index 50d6f5931..ae478017f 100644 --- a/pinecone/core/openapi/db_control/api/manage_indexes_api.py +++ b/pinecone/core/openapi/db_control/api/manage_indexes_api.py @@ -421,7 +421,7 @@ def __create_index_for_model( callable=__create_index_for_model, ) - def __create_index_from_backup( + def __create_index_from_backup_operation( self, backup_id, create_index_from_backup_request, **kwargs: ExtraOpenApiKwargsTypedDict ): """Create an index from a backup # noqa: E501 @@ -430,7 +430,7 @@ def __create_index_from_backup( This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_index_from_backup(backup_id, create_index_from_backup_request, async_req=True) + >>> thread = api.create_index_from_backup_operation(backup_id, create_index_from_backup_request, async_req=True) >>> result = thread.get() Args: @@ -465,12 +465,12 @@ def __create_index_from_backup( kwargs["create_index_from_backup_request"] = create_index_from_backup_request return self.call_with_http_info(**kwargs) - self.create_index_from_backup = _Endpoint( + self.create_index_from_backup_operation = _Endpoint( settings={ "response_type": (CreateIndexFromBackupResponse,), "auth": ["ApiKeyAuth"], "endpoint_path": "/backups/{backup_id}/create-index", - "operation_id": "create_index_from_backup", + "operation_id": "create_index_from_backup_operation", "http_method": "POST", "servers": None, }, @@ -494,7 +494,7 @@ def __create_index_from_backup( }, headers_map={"accept": ["application/json"], "content_type": ["application/json"]}, api_client=api_client, - callable=__create_index_from_backup, + callable=__create_index_from_backup_operation, ) def __delete_backup(self, backup_id, **kwargs: ExtraOpenApiKwargsTypedDict): @@ -1195,6 +1195,8 @@ def __list_project_backups(self, **kwargs: ExtraOpenApiKwargsTypedDict): Keyword Args: + limit (int): The number of results to return per page. [optional] if omitted the server will use the default value of 10. + pagination_token (str): The token to use to retrieve the next page of results. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object @@ -1229,13 +1231,19 @@ def __list_project_backups(self, **kwargs: ExtraOpenApiKwargsTypedDict): "http_method": "GET", "servers": None, }, - params_map={"all": [], "required": [], "nullable": [], "enum": [], "validation": []}, + params_map={ + "all": ["limit", "pagination_token"], + "required": [], + "nullable": [], + "enum": [], + "validation": ["limit"], + }, root_map={ - "validations": {}, + "validations": {("limit",): {"inclusive_maximum": 100, "inclusive_minimum": 1}}, "allowed_values": {}, - "openapi_types": {}, - "attribute_map": {}, - "location_map": {}, + "openapi_types": {"limit": (int,), "pagination_token": (str,)}, + "attribute_map": {"limit": "limit", "pagination_token": "paginationToken"}, + "location_map": {"limit": "query", "pagination_token": "query"}, "collection_format_map": {}, }, headers_map={"accept": ["application/json"], "content_type": []}, @@ -1643,7 +1651,7 @@ async def __create_index_for_model(self, create_index_for_model_request, **kwarg callable=__create_index_for_model, ) - async def __create_index_from_backup( + async def __create_index_from_backup_operation( self, backup_id, create_index_from_backup_request, **kwargs ): """Create an index from a backup # noqa: E501 @@ -1680,12 +1688,12 @@ async def __create_index_from_backup( kwargs["create_index_from_backup_request"] = create_index_from_backup_request return await self.call_with_http_info(**kwargs) - self.create_index_from_backup = _AsyncioEndpoint( + self.create_index_from_backup_operation = _AsyncioEndpoint( settings={ "response_type": (CreateIndexFromBackupResponse,), "auth": ["ApiKeyAuth"], "endpoint_path": "/backups/{backup_id}/create-index", - "operation_id": "create_index_from_backup", + "operation_id": "create_index_from_backup_operation", "http_method": "POST", "servers": None, }, @@ -1709,7 +1717,7 @@ async def __create_index_from_backup( }, headers_map={"accept": ["application/json"], "content_type": ["application/json"]}, api_client=api_client, - callable=__create_index_from_backup, + callable=__create_index_from_backup_operation, ) async def __delete_backup(self, backup_id, **kwargs): @@ -2336,6 +2344,8 @@ async def __list_project_backups(self, **kwargs): Keyword Args: + limit (int): The number of results to return per page. [optional] if omitted the server will use the default value of 10. + pagination_token (str): The token to use to retrieve the next page of results. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object @@ -2367,13 +2377,19 @@ async def __list_project_backups(self, **kwargs): "http_method": "GET", "servers": None, }, - params_map={"all": [], "required": [], "nullable": [], "enum": [], "validation": []}, + params_map={ + "all": ["limit", "pagination_token"], + "required": [], + "nullable": [], + "enum": [], + "validation": ["limit"], + }, root_map={ - "validations": {}, + "validations": {("limit",): {"inclusive_maximum": 100, "inclusive_minimum": 1}}, "allowed_values": {}, - "openapi_types": {}, - "attribute_map": {}, - "location_map": {}, + "openapi_types": {"limit": (int,), "pagination_token": (str,)}, + "attribute_map": {"limit": "limit", "pagination_token": "paginationToken"}, + "location_map": {"limit": "query", "pagination_token": "query"}, "collection_format_map": {}, }, headers_map={"accept": ["application/json"], "content_type": []}, diff --git a/pinecone/core/openapi/db_data/api/namespace_operations_api.py b/pinecone/core/openapi/db_data/api/namespace_operations_api.py index 986efbb21..e28e7430a 100644 --- a/pinecone/core/openapi/db_data/api/namespace_operations_api.py +++ b/pinecone/core/openapi/db_data/api/namespace_operations_api.py @@ -177,14 +177,14 @@ def __describe_namespace(self, namespace, **kwargs: ExtraOpenApiKwargsTypedDict) callable=__describe_namespace, ) - def __list_namespaces(self, **kwargs: ExtraOpenApiKwargsTypedDict): + def __list_namespaces_operation(self, **kwargs: ExtraOpenApiKwargsTypedDict): """Get list of all namespaces # noqa: E501 Get a list of all namespaces within an index. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.list_namespaces(async_req=True) + >>> thread = api.list_namespaces_operation(async_req=True) >>> result = thread.get() @@ -216,12 +216,12 @@ def __list_namespaces(self, **kwargs: ExtraOpenApiKwargsTypedDict): kwargs = self._process_openapi_kwargs(kwargs) return self.call_with_http_info(**kwargs) - self.list_namespaces = _Endpoint( + self.list_namespaces_operation = _Endpoint( settings={ "response_type": (ListNamespacesResponse,), "auth": ["ApiKeyAuth"], "endpoint_path": "/namespaces", - "operation_id": "list_namespaces", + "operation_id": "list_namespaces_operation", "http_method": "GET", "servers": None, }, @@ -242,7 +242,7 @@ def __list_namespaces(self, **kwargs: ExtraOpenApiKwargsTypedDict): }, headers_map={"accept": ["application/json"], "content_type": []}, api_client=api_client, - callable=__list_namespaces, + callable=__list_namespaces_operation, ) @@ -381,7 +381,7 @@ async def __describe_namespace(self, namespace, **kwargs): callable=__describe_namespace, ) - async def __list_namespaces(self, **kwargs): + async def __list_namespaces_operation(self, **kwargs): """Get list of all namespaces # noqa: E501 Get a list of all namespaces within an index. # noqa: E501 @@ -413,12 +413,12 @@ async def __list_namespaces(self, **kwargs): self._process_openapi_kwargs(kwargs) return await self.call_with_http_info(**kwargs) - self.list_namespaces = _AsyncioEndpoint( + self.list_namespaces_operation = _AsyncioEndpoint( settings={ "response_type": (ListNamespacesResponse,), "auth": ["ApiKeyAuth"], "endpoint_path": "/namespaces", - "operation_id": "list_namespaces", + "operation_id": "list_namespaces_operation", "http_method": "GET", "servers": None, }, @@ -439,5 +439,5 @@ async def __list_namespaces(self, **kwargs): }, headers_map={"accept": ["application/json"], "content_type": []}, api_client=api_client, - callable=__list_namespaces, + callable=__list_namespaces_operation, ) diff --git a/pinecone/db_control/resources/asyncio/backup.py b/pinecone/db_control/resources/asyncio/backup.py index cb7928980..391da1e1f 100644 --- a/pinecone/db_control/resources/asyncio/backup.py +++ b/pinecone/db_control/resources/asyncio/backup.py @@ -38,7 +38,8 @@ async def list( result = await self._index_api.list_index_backups(**args) return BackupList(result) else: - result = await self._index_api.list_project_backups() + args = parse_non_empty_args([("limit", limit), ("pagination_token", pagination_token)]) + result = await self._index_api.list_project_backups(**args) return BackupList(result) @require_kwargs diff --git a/pinecone/db_control/resources/asyncio/index.py b/pinecone/db_control/resources/asyncio/index.py index 9694b3d2f..b48ff99cb 100644 --- a/pinecone/db_control/resources/asyncio/index.py +++ b/pinecone/db_control/resources/asyncio/index.py @@ -27,8 +27,8 @@ class IndexResourceAsyncio: def __init__(self, index_api, config): - self.index_api = index_api - self.config = config + self._index_api = index_api + self._config = config async def create( self, @@ -50,7 +50,7 @@ async def create( vector_type=vector_type, tags=tags, ) - resp = await self.index_api.create_index(create_index_request=req) + resp = await self._index_api.create_index(create_index_request=req) if timeout == -1: return IndexModel(resp) @@ -74,7 +74,7 @@ async def create_for_model( tags=tags, deletion_protection=deletion_protection, ) - resp = await self.index_api.create_index_for_model(req) + resp = await self._index_api.create_index_for_model(req) if timeout == -1: return IndexModel(resp) @@ -91,7 +91,7 @@ async def create_from_backup( req = PineconeDBControlRequestFactory.create_index_from_backup_request( name=name, deletion_protection=deletion_protection, tags=tags ) - await self.index_api.create_index_from_backup( + await self._index_api.create_index_from_backup_operation( backup_id=backup_id, create_index_from_backup_request=req ) return await self.__poll_describe_index_until_ready(name, timeout) @@ -135,7 +135,7 @@ async def is_ready() -> bool: return description async def delete(self, name: str, timeout: Optional[int] = None): - await self.index_api.delete_index(name) + await self._index_api.delete_index(name) if timeout == -1: return @@ -157,11 +157,11 @@ async def delete(self, name: str, timeout: Optional[int] = None): ) async def list(self) -> IndexList: - response = await self.index_api.list_indexes() + response = await self._index_api.list_indexes() return IndexList(response) async def describe(self, name: str) -> IndexModel: - description = await self.index_api.describe_index(name) + description = await self._index_api.describe_index(name) return IndexModel(description) async def has(self, name: str) -> bool: @@ -188,4 +188,4 @@ async def configure( deletion_protection=deletion_protection, tags=tags, ) - await self.index_api.configure_index(name, configure_index_request=req) + await self._index_api.configure_index(name, configure_index_request=req) diff --git a/pinecone/db_control/resources/sync/backup.py b/pinecone/db_control/resources/sync/backup.py index e5e7273f4..123b33fbf 100644 --- a/pinecone/db_control/resources/sync/backup.py +++ b/pinecone/db_control/resources/sync/backup.py @@ -37,7 +37,8 @@ def list( ) return BackupList(self._index_api.list_index_backups(**args)) else: - return BackupList(self._index_api.list_project_backups()) + args = parse_non_empty_args([("limit", limit), ("pagination_token", pagination_token)]) + return BackupList(self._index_api.list_project_backups(**args)) @require_kwargs def create(self, *, index_name: str, backup_name: str, description: str = "") -> BackupModel: diff --git a/pinecone/db_control/resources/sync/index.py b/pinecone/db_control/resources/sync/index.py index 26dabe24e..d5e7d6e2d 100644 --- a/pinecone/db_control/resources/sync/index.py +++ b/pinecone/db_control/resources/sync/index.py @@ -112,7 +112,7 @@ def create_from_backup( req = PineconeDBControlRequestFactory.create_index_from_backup_request( name=name, deletion_protection=deletion_protection, tags=tags ) - resp = self._index_api.create_index_from_backup( + resp = self._index_api.create_index_from_backup_operation( backup_id=backup_id, create_index_from_backup_request=req ) logger.info(f"Creating index from backup. Response: {resp}") diff --git a/pinecone/openapi_support/api_version.py b/pinecone/openapi_support/api_version.py index 8725b9f27..de57ca386 100644 --- a/pinecone/openapi_support/api_version.py +++ b/pinecone/openapi_support/api_version.py @@ -2,4 +2,4 @@ # Do not edit this file manually. API_VERSION = "2025-04" -APIS_REPO_SHA = "ba143abc7449abfcf0b6635f1aabff2400dac762" +APIS_REPO_SHA = "4b1c83b3b6669e6596151a575c284ee2cf4977a7" From bcab53c3451c71d2ab831e68f33881c899120016 Mon Sep 17 00:00:00 2001 From: Jen Hamon Date: Wed, 14 May 2025 01:15:01 -0400 Subject: [PATCH 6/7] Do not fail fast --- .github/workflows/testing-integration.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/testing-integration.yaml b/.github/workflows/testing-integration.yaml index be5e6bf30..a90dbed56 100644 --- a/.github/workflows/testing-integration.yaml +++ b/.github/workflows/testing-integration.yaml @@ -11,6 +11,7 @@ jobs: PINECONE_API_KEY: '${{ secrets.PINECONE_API_KEY }}' PINECONE_ADDITIONAL_HEADERS: '{"sdk-test-suite": "pinecone-python-client"}' strategy: + fail-fast: false matrix: python_version: [3.9, 3.12] test_suite: From 01e80631e947834110d32cbdcde574f7c1a89ee2 Mon Sep 17 00:00:00 2001 From: Jen Hamon Date: Wed, 14 May 2025 02:21:17 -0400 Subject: [PATCH 7/7] Extend timeout for test backup creation --- .github/workflows/testing-integration.yaml | 2 +- tests/integration/control/backup/test_backup.py | 4 ++-- tests/integration/control_asyncio/backup/test_backup.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/testing-integration.yaml b/.github/workflows/testing-integration.yaml index a90dbed56..fa153f7e2 100644 --- a/.github/workflows/testing-integration.yaml +++ b/.github/workflows/testing-integration.yaml @@ -33,7 +33,7 @@ jobs: with: include_asyncio: true - name: 'Run tests' - run: poetry run pytest ${{ matrix.test_suite }} --retries 5 --retry-delay 35 -s -vv --log-cli-level=DEBUG + run: poetry run pytest ${{ matrix.test_suite }} --retries 2 --retry-delay 35 -s -vv --log-cli-level=DEBUG inference: name: Inference tests diff --git a/tests/integration/control/backup/test_backup.py b/tests/integration/control/backup/test_backup.py index 2226928f8..6873c414b 100644 --- a/tests/integration/control/backup/test_backup.py +++ b/tests/integration/control/backup/test_backup.py @@ -48,7 +48,7 @@ def test_create_backup(self, pc, ready_sl_index, index_tags): # Wait for the backup to be ready before proceeding backup_ready = False - max_wait = 60 + max_wait = 5 * 60 while not backup_ready: backup_desc = pc.db.backup.describe(backup_id=backup.backup_id) logger.info(f"Backup description: {backup_desc}") @@ -141,7 +141,7 @@ def test_create_backup_legacy_syntax(self, pc, ready_sl_index, index_tags): # Wait for the backup to be ready before proceeding backup_ready = False - max_wait = 60 + max_wait = 5 * 60 while not backup_ready: backup_desc = pc.describe_backup(backup_id=backup.backup_id) logger.info(f"Backup description: {backup_desc}") diff --git a/tests/integration/control_asyncio/backup/test_backup.py b/tests/integration/control_asyncio/backup/test_backup.py index 65be3e8b8..47a67c546 100644 --- a/tests/integration/control_asyncio/backup/test_backup.py +++ b/tests/integration/control_asyncio/backup/test_backup.py @@ -53,7 +53,7 @@ async def test_create_backup(self, ready_sl_index, index_tags): # Wait for the backup to be ready before proceeding backup_ready = False - max_wait = 60 + max_wait = 5 * 60 while not backup_ready: backup_desc = await pc.db.backup.describe(backup_id=backup.backup_id) logger.info(f"Backup description: {backup_desc}") @@ -150,7 +150,7 @@ async def test_create_backup_legacy_syntax(self, ready_sl_index, index_tags): # Wait for the backup to be ready before proceeding backup_ready = False - max_wait = 60 + max_wait = 5 * 60 while not backup_ready: backup_desc = await pc.describe_backup(backup_id=backup.backup_id) logger.info(f"Backup description: {backup_desc}")