From d3aecc0703ec0b6413ee25f6aa8813f73620182e Mon Sep 17 00:00:00 2001 From: Joan Fontanals Date: Thu, 29 Feb 2024 16:43:35 +0100 Subject: [PATCH] chore: blackify (#6146) Co-authored-by: Jina Dev Bot --- jina/__init__.py | 2 +- jina/clients/__init__.py | 1 + jina/clients/base/__init__.py | 12 +- jina/clients/base/grpc.py | 8 +- jina/clients/base/retry.py | 35 +- jina/clients/base/websocket.py | 7 +- jina/clients/mixin.py | 2 + jina/clients/request/helper.py | 1 + jina/excepts.py | 1 + jina/exporter.py | 9 +- jina/helper.py | 1 + jina/importer.py | 23 +- jina/jaml/__init__.py | 12 +- jina/orchestrate/deployments/__init__.py | 16 +- .../deployments/config/docker_compose.py | 20 +- jina/orchestrate/deployments/config/helper.py | 41 +- jina/orchestrate/deployments/config/k8s.py | 66 +- .../config/k8slib/kubernetes_deployment.py | 55 +- .../config/k8slib/kubernetes_tools.py | 9 +- .../install_requirements_helper.py | 18 +- jina/orchestrate/flow/base.py | 24 +- jina/orchestrate/pods/__init__.py | 49 +- jina/orchestrate/pods/container.py | 73 +- jina/orchestrate/pods/factory.py | 4 +- jina/parsers/__init__.py | 24 +- jina/parsers/base.py | 1 + jina/parsers/create.py | 7 +- jina/parsers/export.py | 5 +- jina/parsers/helper.py | 7 +- jina/parsers/orchestrate/base.py | 29 +- jina/parsers/orchestrate/deployment.py | 9 +- jina/parsers/orchestrate/pod.py | 50 +- .../parsers/orchestrate/runtimes/container.py | 9 +- jina/parsers/orchestrate/runtimes/remote.py | 10 +- jina/proto/docarray_v1/pb/jina_pb2.py | 147 ++-- jina/proto/docarray_v1/pb/jina_pb2_grpc.py | 661 +++++++++++------- jina/proto/docarray_v1/pb2/jina_pb2.py | 450 +++++++----- jina/proto/docarray_v1/pb2/jina_pb2_grpc.py | 661 +++++++++++------- jina/proto/docarray_v2/pb/jina_pb2.py | 147 ++-- jina/proto/docarray_v2/pb/jina_pb2_grpc.py | 661 +++++++++++------- jina/proto/docarray_v2/pb2/jina_pb2.py | 450 +++++++----- jina/proto/docarray_v2/pb2/jina_pb2_grpc.py | 661 +++++++++++------- jina/proto/serializer.py | 2 +- jina/resources/base-gateway/gateway.py | 3 +- .../project-template/deployment/client.py | 4 +- .../resources/project-template/flow/client.py | 4 +- .../flow/executor1/executor.py | 2 +- .../consensus/add_voter/pb/add_voter_pb2.py | 29 +- .../add_voter/pb/add_voter_pb2_grpc.py | 167 +++-- .../consensus/add_voter/pb2/add_voter_pb2.py | 86 ++- .../add_voter/pb2/add_voter_pb2_grpc.py | 167 +++-- jina/serve/executors/__init__.py | 8 +- jina/serve/executors/decorators.py | 3 +- jina/serve/executors/metas.py | 10 +- jina/serve/executors/run.py | 48 +- jina/serve/networking/__init__.py | 240 ++++--- jina/serve/networking/connection_stub.py | 20 +- jina/serve/networking/replica_list.py | 26 +- jina/serve/runtimes/asyncio.py | 20 +- .../async_request_response_handling.py | 8 +- .../runtimes/gateway/composite/__init__.py | 1 + jina/serve/runtimes/gateway/gateway.py | 1 + .../runtimes/gateway/graph/topology_graph.py | 312 +++++---- jina/serve/runtimes/gateway/grpc/__init__.py | 2 +- jina/serve/runtimes/gateway/http/__init__.py | 5 +- .../runtimes/gateway/http/fastapi/__init__.py | 1 + .../runtimes/gateway/http_fastapi_app.py | 93 +-- .../gateway/load_balancer/__init__.py | 1 + jina/serve/runtimes/gateway/models.py | 11 +- .../runtimes/gateway/request_handling.py | 8 +- .../runtimes/gateway/websocket/__init__.py | 3 +- .../runtimes/gateway/websocket_fastapi_app.py | 49 +- jina/serve/runtimes/head/request_handling.py | 129 ++-- jina/serve/runtimes/monitoring.py | 4 +- jina/serve/runtimes/servers/__init__.py | 49 +- jina/serve/runtimes/servers/composite.py | 19 +- jina/serve/runtimes/servers/grpc.py | 4 +- jina/serve/runtimes/servers/load_balancer.py | 5 +- jina/serve/runtimes/servers/websocket.py | 25 +- jina/serve/runtimes/worker/batch_queue.py | 11 +- .../runtimes/worker/http_sagemaker_app.py | 1 + .../serve/runtimes/worker/request_handling.py | 140 ++-- jina/serve/stream/__init__.py | 8 +- jina/serve/stream/helper.py | 11 +- jina/types/request/data.py | 48 +- tests/conftest.py | 3 +- .../test_deployment_docker_compose.py | 61 +- .../test_concurrent_clients.py | 7 +- .../test_condition_behavior.py | 18 +- .../deployments/test_deployment.py | 7 +- .../test_distributed_replicas.py | 4 +- .../docarray_v2/docker/executor1/executor.py | 7 +- .../docarray_v2/docker/test_with_docker.py | 18 +- .../issues/github_6137/test_issue.py | 30 +- .../docarray_v2/sagemaker/test_embedding.py | 58 +- .../integration/docarray_v2/test_singleton.py | 251 +++++-- tests/integration/docarray_v2/test_v2.py | 14 +- .../dynamic_batching/test_dynamic_batching.py | 14 +- .../test_floating_deployments.py | 68 +- .../gateway_clients/test_clients_gateways.py | 3 +- .../integration/hot_reload/test_hot_reload.py | 12 +- .../test_install_requirements.py | 5 +- .../issues/github_3124/test_cli_executor.py | 1 - .../github_5543/test_reentrant_flows.py | 7 +- tests/integration/monitoring/test_executor.py | 12 +- .../integration/monitoring/test_monitoring.py | 6 +- .../monitoring/test_request_size.py | 6 +- .../test_multiple_protocols_gateway.py | 4 +- .../network_failures/test_network_failures.py | 12 +- .../pods/container/gateway-runtime/runtime.py | 4 +- .../pods/container/head-runtime/runtime.py | 4 +- tests/integration/rr_cuda/test_rr_cuda.py | 15 +- .../runtimes/test_gateway_dry_run.py | 3 +- .../stateful_no_snapshot_exec/executor.py | 16 +- .../stateful_snapshot_exec/executor.py | 16 +- tests/integration/stateful/test_stateful.py | 101 ++- .../streaming/test_clients_streaming.py | 21 +- .../test_return_order/test_return_order.py | 12 +- tests/jinahub/app.py | 1 + tests/k8s/conftest.py | 2 +- tests/k8s/test_k8s_deployment.py | 18 +- tests/k8s/test_k8s_flow.py | 18 +- .../k8s/test_k8s_graceful_request_handling.py | 12 +- tests/k8s_otel/conftest.py | 3 +- .../unit/clients/python/test_client_errors.py | 43 +- tests/unit/clients/test_asyncio.py | 8 +- .../config/test_docker_compose_pod_config.py | 19 +- .../config/test_k8s_deployment_config.py | 303 ++++---- .../flow/flow-async/test_asyncflow.py | 22 +- .../flow/flow-construct/test_flow.py | 8 +- .../flow-construct/test_flow_monitoring.py | 3 +- .../flow-construct/test_flow_multiprotocol.py | 4 +- .../flow-construct/test_flow_to_k8s_yaml.py | 40 +- .../flow-construct/test_flow_yaml_parser.py | 4 +- .../test_slow_executor_shutdown.py | 4 +- .../pods/container/test_container_pod.py | 3 +- .../unit/orchestrate/pods/test_pod_factory.py | 4 +- .../dynamic_batching/test_batch_queue.py | 17 +- tests/unit/serve/executors/test_executor.py | 18 +- tests/unit/serve/gateway/test_gateway.py | 144 ++-- .../gateway/graph/test_topology_graph.py | 4 +- tests/unit/serve/runtimes/test_helper.py | 30 +- .../runtimes/worker/test_worker_runtime.py | 12 +- tests/unit/test_cli.py | 4 +- tests/unit/test_helper.py | 4 + 145 files changed, 4658 insertions(+), 3180 deletions(-) diff --git a/jina/__init__.py b/jina/__init__.py index 0f1b6958627ae..152c50dd70fe7 100644 --- a/jina/__init__.py +++ b/jina/__init__.py @@ -35,7 +35,7 @@ def _ignore_google_warnings(): 'ignore', category=DeprecationWarning, message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.', - append=True + append=True, ) diff --git a/jina/clients/__init__.py b/jina/clients/__init__.py index 3a934f2cec8fa..14c48b4de1789 100644 --- a/jina/clients/__init__.py +++ b/jina/clients/__init__.py @@ -1,4 +1,5 @@ """Module wrapping the Client of Jina.""" + import argparse from typing import TYPE_CHECKING, List, Optional, Union, overload diff --git a/jina/clients/base/__init__.py b/jina/clients/base/__init__.py index a457bcdb74093..7fe60b15f74e7 100644 --- a/jina/clients/base/__init__.py +++ b/jina/clients/base/__init__.py @@ -1,4 +1,5 @@ """Module containing the Base Client for Jina.""" + import abc import argparse import inspect @@ -48,9 +49,11 @@ def __init__( os.unsetenv('https_proxy') self._inputs = None self._setup_instrumentation( - name=self.args.name - if hasattr(self.args, 'name') - else self.__class__.__name__, + name=( + self.args.name + if hasattr(self.args, 'name') + else self.__class__.__name__ + ), tracing=self.args.tracing, traces_exporter_host=self.args.traces_exporter_host, traces_exporter_port=self.args.traces_exporter_port, @@ -180,8 +183,7 @@ async def _get_results( on_error: Optional['CallbackFnType'] = None, on_always: Optional['CallbackFnType'] = None, **kwargs, - ): - ... + ): ... @abc.abstractmethod def _is_flow_ready(self, **kwargs) -> bool: diff --git a/jina/clients/base/grpc.py b/jina/clients/base/grpc.py index 8d789bd04fee5..204924a57f74d 100644 --- a/jina/clients/base/grpc.py +++ b/jina/clients/base/grpc.py @@ -142,7 +142,9 @@ async def _get_results( compression=self.compression, **kwargs, ) - async for response in stream_rpc.stream_rpc_with_retry(): + async for ( + response + ) in stream_rpc.stream_rpc_with_retry(): yield response else: unary_rpc = UnaryRpc( @@ -257,9 +259,7 @@ async def _get_streaming_results( req.header.exec_endpoint = on req.document_cls = inputs.__class__ req.data.doc = inputs - async for response in self.stream_doc_endpoint( - request=req, timeout=timeout - ): + async for response in self.stream_doc_endpoint(request=req, timeout=timeout): yield return_type.from_protobuf(response.document) diff --git a/jina/clients/base/retry.py b/jina/clients/base/retry.py index f8502bd260189..0ad08375da690 100644 --- a/jina/clients/base/retry.py +++ b/jina/clients/base/retry.py @@ -33,13 +33,14 @@ def _raise_last_attempt(err, attempt): raise err -def sync_wait_or_raise_err(attempt: int, - err: Exception, - max_attempts: float, - backoff_multiplier: float, - initial_backoff: float, - max_backoff: float, - ): +def sync_wait_or_raise_err( + attempt: int, + err: Exception, + max_attempts: float, + backoff_multiplier: float, + initial_backoff: float, + max_backoff: float, +): """ Accepts retry parameters and the underlying. The error is raised if the max_attempts has been reached otherwise the method waits based on the backoff calculations. @@ -53,16 +54,18 @@ def sync_wait_or_raise_err(attempt: int, if attempt == max_attempts: _raise_last_attempt(err, attempt) else: - time.sleep(_wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff)) + time.sleep( + _wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff) + ) async def wait_or_raise_err( - attempt: int, - err: Exception, - max_attempts: float, - backoff_multiplier: float, - initial_backoff: float, - max_backoff: float, + attempt: int, + err: Exception, + max_attempts: float, + backoff_multiplier: float, + initial_backoff: float, + max_backoff: float, ): """ Accepts retry parameters and the underlying. The error is raised if the max_attempts has been reached otherwise the @@ -78,7 +81,9 @@ async def wait_or_raise_err( if attempt == max_attempts: _raise_last_attempt(err, attempt) else: - await asyncio.sleep(_wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff)) + await asyncio.sleep( + _wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff) + ) def _wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff): diff --git a/jina/clients/base/websocket.py b/jina/clients/base/websocket.py index 399a0b33a38ac..a8b868704bac0 100644 --- a/jina/clients/base/websocket.py +++ b/jina/clients/base/websocket.py @@ -1,4 +1,5 @@ """A module for the websockets-based Client for Jina.""" + import asyncio from contextlib import AsyncExitStack from typing import TYPE_CHECKING, Dict, Optional, Tuple @@ -131,9 +132,9 @@ async def _get_results( ) ) - request_buffer: Dict[ - str, asyncio.Future - ] = dict() # maps request_ids to futures (tasks) + request_buffer: Dict[str, asyncio.Future] = ( + dict() + ) # maps request_ids to futures (tasks) def _result_handler(result): return result diff --git a/jina/clients/mixin.py b/jina/clients/mixin.py index 32bdc093f4ebd..ec0c52049d200 100644 --- a/jina/clients/mixin.py +++ b/jina/clients/mixin.py @@ -401,6 +401,7 @@ async def _get_results(*args, **kwargs): inferred_return_type = return_type if docarray_v2: from docarray import DocList + if not issubclass(return_type, DocList): is_singleton = True inferred_return_type = DocList[return_type] @@ -530,6 +531,7 @@ async def post( is_singleton = False if docarray_v2: from docarray import DocList + if issubclass(return_type, DocList): result.document_array_cls = return_type else: diff --git a/jina/clients/request/helper.py b/jina/clients/request/helper.py index 338dea96551c2..9976c470216fe 100644 --- a/jina/clients/request/helper.py +++ b/jina/clients/request/helper.py @@ -1,4 +1,5 @@ """Module for helper functions for clients.""" + from typing import Optional, Tuple from jina._docarray import Document, DocumentArray, docarray_v2 diff --git a/jina/excepts.py b/jina/excepts.py index a5b7bb403150e..9385e2f724a03 100644 --- a/jina/excepts.py +++ b/jina/excepts.py @@ -1,4 +1,5 @@ """This modules defines all kinds of exceptions raised in Jina.""" + from typing import List, Optional, Set, Union import grpc.aio diff --git a/jina/exporter.py b/jina/exporter.py index 328e1758a29b0..5bc4e9575d00d 100644 --- a/jina/exporter.py +++ b/jina/exporter.py @@ -22,7 +22,9 @@ def export_kubernetes(args): output_base_path=args.outpath, k8s_namespace=args.k8s_namespace ) else: - raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Kubernetes') + raise NotImplementedError( + f'Object of class {obj.__class__.__name__} cannot be exported to Kubernetes' + ) def export_docker_compose(args): @@ -40,7 +42,9 @@ def export_docker_compose(args): output_path=args.outpath, network_name=args.network_name ) else: - raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Docker Compose') + raise NotImplementedError( + f'Object of class {obj.__class__.__name__} cannot be exported to Docker Compose' + ) def export_flowchart(args): @@ -59,6 +63,7 @@ def export_schema(args): :param args: args from CLI """ from jina import __version__ + if args.yaml_path: dump_api = api_to_dict() for yp in args.yaml_path: diff --git a/jina/helper.py b/jina/helper.py index 6e1f0f2ea51ed..b5bcd9759b4b8 100644 --- a/jina/helper.py +++ b/jina/helper.py @@ -1666,4 +1666,5 @@ def _telemetry(): def is_generator(func): import inspect + return inspect.isgeneratorfunction(func) or inspect.isasyncgenfunction(func) diff --git a/jina/importer.py b/jina/importer.py index 60402365335c6..e13d93f355ae5 100644 --- a/jina/importer.py +++ b/jina/importer.py @@ -39,18 +39,20 @@ def __init__( def __enter__(self): return self - + def _check_v(self, v, missing_module): if ( - v.strip() - and not v.startswith('#') - and v.startswith(missing_module) - and ':' in v - ): + v.strip() + and not v.startswith('#') + and v.startswith(missing_module) + and ':' in v + ): return True def _find_missing_module_in_extra_req(self, missing_module): - with open(os.path.join(__resources_path__, 'extra-requirements.txt'), encoding='utf-8') as fp: + with open( + os.path.join(__resources_path__, 'extra-requirements.txt'), encoding='utf-8' + ) as fp: for v in fp: if self._check_v(v, missing_module): missing_module, install_tags = v.split(':') @@ -63,7 +65,6 @@ def _find_missing_module(self, exc_val): missing_module = self._find_missing_module_in_extra_req(missing_module) return missing_module - def _err_msg(self, exc_val, missing_module): if self._tags: from jina.helper import colored @@ -84,7 +85,7 @@ def _err_msg(self, exc_val, missing_module): else: err_msg = f'{exc_val.msg}' return err_msg - + def _log_critical(self, err_msg): if self._verbose and self._logger: self._logger.critical(err_msg) @@ -95,7 +96,7 @@ def _log_warning(self, err_msg): if self._verbose and self._logger: self._logger.warning(err_msg) if self._help_text: - self._logger.info(self._help_text) + self._logger.info(self._help_text) def _raise_or_supress(self, err_msg, exc_val): if self._verbose and not self._logger: @@ -107,14 +108,12 @@ def _raise_or_supress(self, err_msg, exc_val): self._log_warning(err_msg) return True # suppress the error - def __exit__(self, exc_type, exc_val, traceback): if exc_type != ModuleNotFoundError: return missing_module = self._find_missing_module(exc_val) err_msg = self._err_msg(exc_val, missing_module) return self._raise_or_supress(err_msg, exc_val) - def _path_import(absolute_path: str): diff --git a/jina/jaml/__init__.py b/jina/jaml/__init__.py index a449066273520..040136d434394 100644 --- a/jina/jaml/__init__.py +++ b/jina/jaml/__init__.py @@ -680,9 +680,7 @@ def load_config( :return: :class:`JAMLCompatible` object """ if runtime_args: - kwargs[ - 'runtimes_args' - ] = ( + kwargs['runtimes_args'] = ( dict() ) # when we have runtime args it is needed to have an empty runtime args session in the yam config @@ -741,9 +739,11 @@ def _delitem( _extra_search_paths = extra_search_paths or [] load_py_modules( no_tag_yml, - extra_search_paths=(_extra_search_paths + [os.path.dirname(s_path)]) - if s_path - else _extra_search_paths, + extra_search_paths=( + (_extra_search_paths + [os.path.dirname(s_path)]) + if s_path + else _extra_search_paths + ), ) from jina.enums import DeploymentRoleType diff --git a/jina/orchestrate/deployments/__init__.py b/jina/orchestrate/deployments/__init__.py index 580370fd6373f..7bbfa82468abb 100644 --- a/jina/orchestrate/deployments/__init__.py +++ b/jina/orchestrate/deployments/__init__.py @@ -605,9 +605,11 @@ def _get_connection_list_for_flow(self) -> List[str]: # there is no head, add the worker connection information instead ports = self.ports hosts = [ - __docker_host__ - if host_is_local(host) and in_docker() and self._is_docker - else host + ( + __docker_host__ + if host_is_local(host) and in_docker() and self._is_docker + else host + ) for host in self.hosts ] return [ @@ -1133,9 +1135,11 @@ def start(self) -> 'Deployment': deployment_args=self.args, args=self.pod_args['pods'][shard_id], head_pod=self.head_pod, - name=f'{self.name}-replica-set-{shard_id}' - if num_shards > 1 - else f'{self.name}-replica-set', + name=( + f'{self.name}-replica-set-{shard_id}' + if num_shards > 1 + else f'{self.name}-replica-set' + ), ) self.enter_context(self.shards[shard_id]) diff --git a/jina/orchestrate/deployments/config/docker_compose.py b/jina/orchestrate/deployments/config/docker_compose.py index 326a762958485..145e157254ee6 100644 --- a/jina/orchestrate/deployments/config/docker_compose.py +++ b/jina/orchestrate/deployments/config/docker_compose.py @@ -275,13 +275,13 @@ def __init__( shard_id=i, common_args=self.args, service_args=args, - pod_type=PodRoleType.WORKER - if name != 'gateway' - else PodRoleType.GATEWAY, + pod_type=( + PodRoleType.WORKER if name != 'gateway' else PodRoleType.GATEWAY + ), jina_deployment_name=self.name, - deployments_addresses=self.deployments_addresses - if name == 'gateway' - else None, + deployments_addresses=( + self.deployments_addresses if name == 'gateway' else None + ), ) ) @@ -342,9 +342,7 @@ def _get_services_args(self, args): uses_before_cargs.pod_role = PodRoleType.WORKER uses_before_cargs.polling = None parsed_args['uses_before_service'] = uses_before_cargs - parsed_args[ - 'head_service' - ].uses_before_address = ( + parsed_args['head_service'].uses_before_address = ( f'{to_compatible_name(uses_before_cargs.name)}:{uses_before_cargs.port}' ) if uses_after and shards > 1: @@ -366,9 +364,7 @@ def _get_services_args(self, args): uses_after_cargs.pod_role = PodRoleType.WORKER uses_after_cargs.polling = None parsed_args['uses_after_service'] = uses_after_cargs - parsed_args[ - 'head_service' - ].uses_after_address = ( + parsed_args['head_service'].uses_after_address = ( f'{to_compatible_name(uses_after_cargs.name)}:{uses_after_cargs.port}' ) diff --git a/jina/orchestrate/deployments/config/helper.py b/jina/orchestrate/deployments/config/helper.py index 23535d0794e0f..472c8c9ab9eb8 100644 --- a/jina/orchestrate/deployments/config/helper.py +++ b/jina/orchestrate/deployments/config/helper.py @@ -10,7 +10,7 @@ __default_grpc_gateway__, __default_http_gateway__, __default_websocket_gateway__, - __dynamic_base_gateway_hubble__ + __dynamic_base_gateway_hubble__, ) from jina.enums import PodRoleType @@ -22,21 +22,19 @@ def resolve_image_name(uses: Optional[str]): :return: image name equivalent """ - if uses in [__default_http_gateway__, - __default_websocket_gateway__, - __default_grpc_gateway__, - __default_composite_gateway__]: - image_name = os.getenv( - 'JINA_GATEWAY_IMAGE', None - ) + if uses in [ + __default_http_gateway__, + __default_websocket_gateway__, + __default_grpc_gateway__, + __default_composite_gateway__, + ]: + image_name = os.getenv('JINA_GATEWAY_IMAGE', None) if image_name is None: image_name = get_image_name(__dynamic_base_gateway_hubble__) elif uses is not None and uses != __default_executor__: image_name = get_image_name(uses) else: - image_name = os.getenv( - 'JINA_GATEWAY_IMAGE', None - ) + image_name = os.getenv('JINA_GATEWAY_IMAGE', None) if image_name is None: image_name = get_image_name(__dynamic_base_gateway_hubble__) @@ -88,6 +86,7 @@ def get_base_executor_version(): try: from jina import __version__ + url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags' result: Dict = requests.get(url, params={'name': __version__}).json() if result.get('count', 0) > 0: @@ -158,16 +157,16 @@ def validate_uses(uses: str): # default gateway class or default executor => deployment uses base container and sets uses in command # container images => deployment uses the specified container image and uses is defined by container if ( - uses is None - or uses - in [ - __default_http_gateway__, - __default_websocket_gateway__, - __default_grpc_gateway__, - __default_composite_gateway__, - __default_executor__, - ] - or uses.startswith('docker://') + uses is None + or uses + in [ + __default_http_gateway__, + __default_websocket_gateway__, + __default_grpc_gateway__, + __default_composite_gateway__, + __default_executor__, + ] + or uses.startswith('docker://') ): return True diff --git a/jina/orchestrate/deployments/config/k8s.py b/jina/orchestrate/deployments/config/k8s.py index b17329d3e7d0c..720766d3de189 100644 --- a/jina/orchestrate/deployments/config/k8s.py +++ b/jina/orchestrate/deployments/config/k8s.py @@ -30,15 +30,15 @@ class K8sDeploymentConfig: class _K8sDeployment: def __init__( - self, - name: str, - version: str, - pod_type: PodRoleType, - jina_deployment_name: str, - shard_id: Optional[int], - common_args: Union['Namespace', Dict], - deployment_args: Union['Namespace', Dict], - k8s_namespace: str, + self, + name: str, + version: str, + pod_type: PodRoleType, + jina_deployment_name: str, + shard_id: Optional[int], + common_args: Union['Namespace', Dict], + deployment_args: Union['Namespace', Dict], + k8s_namespace: str, ): self.name = name self.dns_name = to_compatible_name(name) @@ -52,7 +52,7 @@ def __init__( self.k8s_namespace = k8s_namespace def get_gateway_yamls( - self, + self, ) -> List[Dict]: cargs = copy.copy(self.deployment_args) from jina.helper import ArgNamespace @@ -112,9 +112,7 @@ def _get_container_args(self, cargs, pod_type): cargs, uses_metas, uses_with, pod_type ) - def get_runtime_yamls( - self - ) -> List[Dict]: + def get_runtime_yamls(self) -> List[Dict]: cargs = copy.copy(self.deployment_args) image_name = resolve_image_name(cargs.uses) @@ -198,9 +196,9 @@ def get_runtime_yamls( ) def __init__( - self, - args: Union['Namespace', Dict], - k8s_namespace: Optional[str] = None, + self, + args: Union['Namespace', Dict], + k8s_namespace: Optional[str] = None, ): # External Deployments should be ignored in a K8s based Flow assert not (hasattr(args, 'external') and args.external) @@ -240,17 +238,15 @@ def __init__( shard_id=i, common_args=self.args, deployment_args=args, - pod_type=PodRoleType.WORKER - if name != 'gateway' - else PodRoleType.GATEWAY, + pod_type=( + PodRoleType.WORKER if name != 'gateway' else PodRoleType.GATEWAY + ), jina_deployment_name=self.name, k8s_namespace=self.k8s_namespace, ) ) - def _get_deployment_args( - self, args - ): + def _get_deployment_args(self, args): parsed_args = { 'head_deployment': None, 'deployments': [], @@ -267,9 +263,9 @@ def _get_deployment_args( ) parsed_args['head_deployment'].gpus = None parsed_args['head_deployment'].port = GrpcConnectionPool.K8S_PORT - parsed_args[ - 'head_deployment' - ].port_monitoring = GrpcConnectionPool.K8S_PORT_MONITORING + parsed_args['head_deployment'].port_monitoring = ( + GrpcConnectionPool.K8S_PORT_MONITORING + ) parsed_args['head_deployment'].uses = None parsed_args['head_deployment'].uses_metas = None parsed_args['head_deployment'].uses_with = None @@ -283,24 +279,20 @@ def _get_deployment_args( if shards > 1 else f'{to_compatible_name(self.name)}' ) - connection_list[ - str(i) - ] = f'{name}.{self.k8s_namespace}.svc:{GrpcConnectionPool.K8S_PORT}' + connection_list[str(i)] = ( + f'{name}.{self.k8s_namespace}.svc:{GrpcConnectionPool.K8S_PORT}' + ) parsed_args['head_deployment'].connection_list = json.dumps( connection_list ) if uses_before: - parsed_args[ - 'head_deployment' - ].uses_before_address = ( + parsed_args['head_deployment'].uses_before_address = ( f'127.0.0.1:{GrpcConnectionPool.K8S_PORT_USES_BEFORE}' ) if uses_after: - parsed_args[ - 'head_deployment' - ].uses_after_address = ( + parsed_args['head_deployment'].uses_after_address = ( f'127.0.0.1:{GrpcConnectionPool.K8S_PORT_USES_AFTER}' ) @@ -310,7 +302,9 @@ def _get_deployment_args( cargs.shard_id = i cargs.uses_before = None cargs.uses_after = None - cargs.port = [GrpcConnectionPool.K8S_PORT + i for i in range(len(cargs.protocol))] + cargs.port = [ + GrpcConnectionPool.K8S_PORT + i for i in range(len(cargs.protocol)) + ] cargs.port_monitoring = GrpcConnectionPool.K8S_PORT_MONITORING cargs.uses_before_address = None @@ -324,7 +318,7 @@ def _get_deployment_args( return parsed_args def to_kubernetes_yaml( - self, + self, ) -> List[Tuple[str, List[Dict]]]: """ Return a list of dictionary configurations. One for each deployment in this Deployment diff --git a/jina/orchestrate/deployments/config/k8slib/kubernetes_deployment.py b/jina/orchestrate/deployments/config/k8slib/kubernetes_deployment.py index 315c4f099afdc..41017d13856e0 100644 --- a/jina/orchestrate/deployments/config/k8slib/kubernetes_deployment.py +++ b/jina/orchestrate/deployments/config/k8slib/kubernetes_deployment.py @@ -11,30 +11,30 @@ def get_template_yamls( - name: str, - namespace: str, - image_name: str, - container_cmd: str, - container_args: str, - replicas: int, - pull_policy: str, - jina_deployment_name: str, - pod_type: str, - shard_id: Optional[int] = None, - env: Optional[Dict] = None, - env_from_secret: Optional[Dict] = None, - image_pull_secrets: Optional[List] = None, - gpus: Optional[Union[int, str]] = None, - image_name_uses_before: Optional[str] = None, - image_name_uses_after: Optional[str] = None, - container_cmd_uses_before: Optional[str] = None, - container_cmd_uses_after: Optional[str] = None, - container_args_uses_before: Optional[str] = None, - container_args_uses_after: Optional[str] = None, - monitoring: bool = False, - protocol: Optional[Union[str, List[str]]] = None, - volumes: Optional[List[str]] = None, - timeout_ready: int = 600000, + name: str, + namespace: str, + image_name: str, + container_cmd: str, + container_args: str, + replicas: int, + pull_policy: str, + jina_deployment_name: str, + pod_type: str, + shard_id: Optional[int] = None, + env: Optional[Dict] = None, + env_from_secret: Optional[Dict] = None, + image_pull_secrets: Optional[List] = None, + gpus: Optional[Union[int, str]] = None, + image_name_uses_before: Optional[str] = None, + image_name_uses_after: Optional[str] = None, + container_cmd_uses_before: Optional[str] = None, + container_cmd_uses_after: Optional[str] = None, + container_args_uses_before: Optional[str] = None, + container_args_uses_after: Optional[str] = None, + monitoring: bool = False, + protocol: Optional[Union[str, List[str]]] = None, + volumes: Optional[List[str]] = None, + timeout_ready: int = 600000, ) -> List[Dict]: """Get the yaml description of a service on Kubernetes @@ -68,7 +68,11 @@ def get_template_yamls( """ # we can always assume the ports are the same for all executors since they run on different k8s pods # port expose can be defined by the user - port = [GrpcConnectionPool.K8S_PORT + i for i in range(len(protocol))] if isinstance(protocol, list) else GrpcConnectionPool.K8S_PORT # TODO: This cannot happen + port = ( + [GrpcConnectionPool.K8S_PORT + i for i in range(len(protocol))] + if isinstance(protocol, list) + else GrpcConnectionPool.K8S_PORT + ) # TODO: This cannot happen port_monitoring = GrpcConnectionPool.K8S_PORT_MONITORING # we cast port to list of ports and protocol to list of protocols @@ -215,4 +219,3 @@ def get_template_yamls( yamls.append(service_monitor_yaml) return yamls - diff --git a/jina/orchestrate/deployments/config/k8slib/kubernetes_tools.py b/jina/orchestrate/deployments/config/k8slib/kubernetes_tools.py index 6885bd0c7617e..0b07fdeb1b3c5 100644 --- a/jina/orchestrate/deployments/config/k8slib/kubernetes_tools.py +++ b/jina/orchestrate/deployments/config/k8slib/kubernetes_tools.py @@ -87,7 +87,10 @@ def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict: def _get_deployment_with_env_secret(deployment: Dict, params: Dict) -> Dict: for k, v in params['env_from_secret'].items(): - env_var = {'name': k, 'valueFrom': {'secretKeyRef': {'name': v['name'], 'key': v['key']}}} + env_var = { + 'name': k, + 'valueFrom': {'secretKeyRef': {'name': v['name'], 'key': v['key']}}, + } deployment['spec']['template']['spec']['containers'][0]['env'].append(env_var) @@ -97,7 +100,5 @@ def _get_deployment_with_env_secret(deployment: Dict, params: Dict) -> Dict: def _get_deployment_with_image_pull_secrets(deployment: Dict, params: Dict) -> Dict: image_pull_secrets = params['image_pull_secrets'] image_pull_secrets_dict = [{'name': secret} for secret in image_pull_secrets] - deployment['spec']['template']['spec'][ - 'imagePullSecrets' - ] = image_pull_secrets_dict + deployment['spec']['template']['spec']['imagePullSecrets'] = image_pull_secrets_dict return deployment diff --git a/jina/orchestrate/deployments/install_requirements_helper.py b/jina/orchestrate/deployments/install_requirements_helper.py index 63a5b9a80962a..157ab5b22ac05 100644 --- a/jina/orchestrate/deployments/install_requirements_helper.py +++ b/jina/orchestrate/deployments/install_requirements_helper.py @@ -137,9 +137,7 @@ def _get_install_options(requirements_file: 'Path', excludes: Tuple[str] = ('jin return install_reqs, install_options -def _is_requirements_installed( - requirements_file: 'Path' -) -> bool: +def _is_requirements_installed(requirements_file: 'Path') -> bool: """Return True if requirements.txt is installed locally :param requirements_file: the requirements.txt file :return: True or False if not satisfied @@ -160,6 +158,7 @@ def _is_requirements_installed( pkg_resources.require('\n'.join(install_reqs)) except (DistributionNotFound, VersionConflict, RequirementParseError) as ex: import warnings + warnings.warn(repr(ex)) return isinstance(ex, VersionConflict) return True @@ -172,6 +171,7 @@ def _install_requirements(requirements_file: 'Path', timeout: int = 1000): """ import subprocess import sys + if _is_requirements_installed(requirements_file): return @@ -191,9 +191,7 @@ def _install_requirements(requirements_file: 'Path', timeout: int = 1000): ) -def install_package_dependencies( - pkg_path: Optional['Path'] -) -> None: +def install_package_dependencies(pkg_path: Optional['Path']) -> None: """ :param pkg_path: package path @@ -205,15 +203,19 @@ def install_package_dependencies( if requirements_file.exists(): _install_requirements(requirements_file) + def _get_package_path_from_uses(uses: str) -> Optional['Path']: if isinstance(uses, str) and os.path.exists(uses): from pathlib import Path + return Path(os.path.dirname(os.path.abspath(uses))) else: from hubble.executor.helper import is_valid_huburi + if not is_valid_huburi(uses): from jina.logging.predefined import default_logger - + default_logger.warning( - f'Error getting the directory name from {uses}. `--install-requirements` option is only valid when `uses` is a configuration file.') + f'Error getting the directory name from {uses}. `--install-requirements` option is only valid when `uses` is a configuration file.' + ) return None diff --git a/jina/orchestrate/flow/base.py b/jina/orchestrate/flow/base.py index 3f93448c0015a..5c3622224ff78 100644 --- a/jina/orchestrate/flow/base.py +++ b/jina/orchestrate/flow/base.py @@ -661,9 +661,11 @@ def _add_gateway( deployment_role=DeploymentRoleType.GATEWAY, expose_endpoints=json.dumps(self._endpoints_mapping), env=self.env, - log_config=kwargs.get('log_config') - if 'log_config' in kwargs - else self.args.log_config, + log_config=( + kwargs.get('log_config') + if 'log_config' in kwargs + else self.args.log_config + ), ) ) @@ -1246,9 +1248,11 @@ def add( dict( name=deployment_name, deployment_role=deployment_role, - log_config=kwargs.get('log_config') - if 'log_config' in kwargs - else self.args.log_config, + log_config=( + kwargs.get('log_config') + if 'log_config' in kwargs + else self.args.log_config + ), ) ) parser = set_deployment_parser() @@ -1741,9 +1745,11 @@ def build(self, copy_flow: bool = False, **kwargs) -> 'Flow': # but not those inspect related node if op_flow.args.inspect.is_keep: deployment.needs = set( - ep - if deployment.role.is_inspect - else op_flow._inspect_deployments.get(ep, ep) + ( + ep + if deployment.role.is_inspect + else op_flow._inspect_deployments.get(ep, ep) + ) for ep in deployment.needs ) else: diff --git a/jina/orchestrate/pods/__init__.py b/jina/orchestrate/pods/__init__.py index d6847187c1ff7..616f264a10176 100644 --- a/jina/orchestrate/pods/__init__.py +++ b/jina/orchestrate/pods/__init__.py @@ -32,7 +32,12 @@ class BasePod(ABC): def __init__(self, args: 'argparse.Namespace'): self.args = args if self.args.pod_role == PodRoleType.GATEWAY: - _update_gateway_args(self.args, gateway_load_balancer=getattr(self.args, 'gateway_load_balancer', False)) + _update_gateway_args( + self.args, + gateway_load_balancer=getattr( + self.args, 'gateway_load_balancer', False + ), + ) self.args.parallel = getattr(self.args, 'shards', 1) self.name = self.args.name or self.__class__.__name__ self.logger = JinaLogger(self.name, **vars(self.args)) @@ -71,7 +76,7 @@ def close(self) -> None: self.logger.debug(f'terminate') self._terminate() if not self.is_shutdown.wait( - timeout=self._timeout_ctrl if not __windows__ else 1.0 + timeout=self._timeout_ctrl if not __windows__ else 1.0 ): if not __windows__: raise Exception( @@ -83,10 +88,12 @@ def close(self) -> None: ) except Exception as ex: self.logger.error( - f'{ex!r} during {self.close!r}' - + f'\n add "--quiet-error" to suppress the exception details' - if not self.args.quiet_error - else '', + ( + f'{ex!r} during {self.close!r}' + + f'\n add "--quiet-error" to suppress the exception details' + if not self.args.quiet_error + else '' + ), exc_info=not self.args.quiet_error, ) else: @@ -95,7 +102,9 @@ def close(self) -> None: f'{"shutdown is already set" if self.is_shutdown.is_set() else "Runtime was never started"}. Runtime will end gracefully on its own' ) if not self.is_shutdown.is_set(): - self.is_signal_handlers_installed.wait(timeout=self._timeout_ctrl if not __windows__ else 1.0) # waiting for is_signal_handlers_installed will make sure signal handlers are installed + self.is_signal_handlers_installed.wait( + timeout=self._timeout_ctrl if not __windows__ else 1.0 + ) # waiting for is_signal_handlers_installed will make sure signal handlers are installed self._terminate() self.is_shutdown.set() self.logger.debug(__stop_msg__) @@ -115,6 +124,7 @@ def _wait_for_ready_or_shutdown(self, timeout: Optional[float]): .. # noqa: DAR201 """ from jina.serve.runtimes.servers import BaseServer + return BaseServer.wait_for_ready_or_shutdown( timeout=timeout, ready_or_shutdown_event=self.ready_or_shutdown.event, @@ -187,7 +197,9 @@ async def async_wait_start_success(self): check_protocol = getattr(self.args, 'protocol', ["grpc"])[0] async def check_readiness_server(): - self.logger.debug(f'Checking readiness to {self.runtime_ctrl_address} with protocol {check_protocol}') + self.logger.debug( + f'Checking readiness to {self.runtime_ctrl_address} with protocol {check_protocol}' + ) ready = await BaseServer.async_is_ready( ctrl_address=self.runtime_ctrl_address, timeout=_timeout, @@ -196,21 +208,23 @@ async def check_readiness_server(): # Executor does not have protocol yet ) if ready: - self.logger.debug(f'Server on {self.runtime_ctrl_address} with protocol {check_protocol} is ready') + self.logger.debug( + f'Server on {self.runtime_ctrl_address} with protocol {check_protocol} is ready' + ) else: - self.logger.debug(f'Server on {self.runtime_ctrl_address} with protocol {check_protocol} is not yet ready') + self.logger.debug( + f'Server on {self.runtime_ctrl_address} with protocol {check_protocol} is not yet ready' + ) return ready while timeout_ns is None or time.time_ns() - now < timeout_ns: if ( - self.ready_or_shutdown.event.is_set() - and ( # submit the health check to the pod, if it is + self.ready_or_shutdown.event.is_set() + and ( # submit the health check to the pod, if it is self.is_shutdown.is_set() # a worker and not shutdown or not self.args.pod_role == PodRoleType.WORKER - or ( - await check_readiness_server() - ) - ) + or (await check_readiness_server()) + ) ): self._check_failed_to_start() self.logger.debug(__ready_msg__) @@ -235,8 +249,7 @@ def start(self): ... @abstractmethod - def _terminate(self): - ... + def _terminate(self): ... @abstractmethod def join(self, *args, **kwargs): diff --git a/jina/orchestrate/pods/container.py b/jina/orchestrate/pods/container.py index 283611c99a1c4..c1808542d3aff 100644 --- a/jina/orchestrate/pods/container.py +++ b/jina/orchestrate/pods/container.py @@ -28,12 +28,12 @@ def _docker_run( - client: 'DockerClient', - args: 'argparse.Namespace', - container_name: str, - envs: Dict, - net_mode: Optional[str], - logger: 'JinaLogger', + client: 'DockerClient', + args: 'argparse.Namespace', + container_name: str, + envs: Dict, + net_mode: Optional[str], + logger: 'JinaLogger', ): # important to notice, that client is not assigned as instance member to avoid potential # heavy copy into new process memory space @@ -114,7 +114,7 @@ def _docker_run( _volumes = {} if not getattr(args, 'disable_auto_volume', None) and not getattr( - args, 'volumes', None + args, 'volumes', None ): ( generated_volumes, @@ -175,16 +175,16 @@ def _docker_run( def run( - args: 'argparse.Namespace', - name: str, - container_name: str, - net_mode: Optional[str], - runtime_ctrl_address: str, - envs: Dict, - is_started: Union['multiprocessing.Event', 'threading.Event'], - is_shutdown: Union['multiprocessing.Event', 'threading.Event'], - is_ready: Union['multiprocessing.Event', 'threading.Event'], - is_signal_handlers_installed: Union['multiprocessing.Event', 'threading.Event'], + args: 'argparse.Namespace', + name: str, + container_name: str, + net_mode: Optional[str], + runtime_ctrl_address: str, + envs: Dict, + is_started: Union['multiprocessing.Event', 'threading.Event'], + is_shutdown: Union['multiprocessing.Event', 'threading.Event'], + is_ready: Union['multiprocessing.Event', 'threading.Event'], + is_signal_handlers_installed: Union['multiprocessing.Event', 'threading.Event'], ): """Method to be run in a process that stream logs from a Container @@ -235,9 +235,9 @@ def _set_cancel(*args, **kwargs): ) else: with ImportExtensions( - required=True, - logger=logger, - help_text='''If you see a 'DLL load failed' error, please reinstall `pywin32`. + required=True, + logger=logger, + help_text='''If you see a 'DLL load failed' error, please reinstall `pywin32`. If you're using conda, please use the command `conda install -c anaconda pywin32`''', ): import win32api @@ -260,8 +260,10 @@ def _set_cancel(*args, **kwargs): def _is_ready(): from jina.serve.runtimes.servers import BaseServer + return BaseServer.is_ready( - ctrl_address=runtime_ctrl_address, protocol=getattr(args, 'protocol', ["grpc"])[0] + ctrl_address=runtime_ctrl_address, + protocol=getattr(args, 'protocol', ["grpc"])[0], ) def _is_container_alive(container) -> bool: @@ -274,11 +276,7 @@ def _is_container_alive(container) -> bool: return True async def _check_readiness(container): - while ( - _is_container_alive(container) - and not _is_ready() - and not cancel - ): + while _is_container_alive(container) and not _is_ready() and not cancel: await asyncio.sleep(0.1) if _is_container_alive(container): is_started.set() @@ -288,11 +286,7 @@ async def _check_readiness(container): async def _stream_starting_logs(container): for line in container.logs(stream=True): - if ( - not is_started.is_set() - and not fail_to_start - and not cancel - ): + if not is_started.is_set() and not fail_to_start and not cancel: await asyncio.sleep(0.01) msg = line.decode().rstrip() # type: str logger.debug(re.sub(r'\u001b\[.*?[@-~]', '', msg)) @@ -321,9 +315,9 @@ class ContainerPod(BasePod): def __init__(self, args: 'argparse.Namespace'): super().__init__(args) if ( - self.args.docker_kwargs - and 'extra_hosts' in self.args.docker_kwargs - and __docker_host__ in self.args.docker_kwargs['extra_hosts'] + self.args.docker_kwargs + and 'extra_hosts' in self.args.docker_kwargs + and __docker_host__ in self.args.docker_kwargs['extra_hosts'] ): self.args.docker_kwargs.pop('extra_hosts') self._net_mode = None @@ -339,9 +333,9 @@ def _get_control_address(self): network = get_docker_network(client) if ( - self.args.docker_kwargs - and 'extra_hosts' in self.args.docker_kwargs - and __docker_host__ in self.args.docker_kwargs['extra_hosts'] + self.args.docker_kwargs + and 'extra_hosts' in self.args.docker_kwargs + and __docker_host__ in self.args.docker_kwargs['extra_hosts'] ): ctrl_host = __docker_host__ elif network: @@ -372,7 +366,10 @@ def _get_network_for_dind_linux(self, client: 'DockerClient', ctrl_address: str) # Related to potential docker-in-docker communication. If `Runtime` lives already inside a container. # it will need to communicate using the `bridge` network. # In WSL, we need to set ports explicitly - net_mode, runtime_ctrl_address = getattr(self.args, 'force_network_mode', DockerNetworkMode.AUTO), ctrl_address + net_mode, runtime_ctrl_address = ( + getattr(self.args, 'force_network_mode', DockerNetworkMode.AUTO), + ctrl_address, + ) if sys.platform in ('linux', 'linux2') and 'microsoft' not in uname().release: if net_mode == DockerNetworkMode.AUTO: net_mode = DockerNetworkMode.HOST diff --git a/jina/orchestrate/pods/factory.py b/jina/orchestrate/pods/factory.py index c2148c98a2920..f558947ca7bde 100644 --- a/jina/orchestrate/pods/factory.py +++ b/jina/orchestrate/pods/factory.py @@ -19,7 +19,9 @@ class PodFactory: """ @staticmethod - def build_pod(args: 'Namespace', gateway_load_balancer: bool = False) -> Type['BasePod']: + def build_pod( + args: 'Namespace', gateway_load_balancer: bool = False + ) -> Type['BasePod']: """Build an implementation of a `BasePod` interface :param args: deployment arguments parsed from the CLI. diff --git a/jina/parsers/__init__.py b/jina/parsers/__init__.py index 83eba4b52eb44..ddcfc347491b9 100644 --- a/jina/parsers/__init__.py +++ b/jina/parsers/__init__.py @@ -24,12 +24,11 @@ def set_pod_parser(parser=None, default_name=None): from jina.parsers.orchestrate.base import mixin_scalable_deployment_parser from jina.parsers.orchestrate.pod import mixin_pod_parser - from jina.parsers.orchestrate.runtimes.container import \ - mixin_container_runtime_parser - from jina.parsers.orchestrate.runtimes.remote import \ - mixin_remote_runtime_parser - from jina.parsers.orchestrate.runtimes.worker import \ - mixin_worker_runtime_parser + from jina.parsers.orchestrate.runtimes.container import ( + mixin_container_runtime_parser, + ) + from jina.parsers.orchestrate.runtimes.remote import mixin_remote_runtime_parser + from jina.parsers.orchestrate.runtimes.worker import mixin_worker_runtime_parser mixin_scalable_deployment_parser(parser, default_name=default_name) mixin_worker_runtime_parser(parser) @@ -55,8 +54,7 @@ def set_deployment_parser(parser=None): set_pod_parser(parser, default_name='executor') - from jina.parsers.orchestrate.deployment import \ - mixin_base_deployment_parser + from jina.parsers.orchestrate.deployment import mixin_base_deployment_parser mixin_base_deployment_parser(parser) @@ -139,10 +137,14 @@ def set_client_cli_parser(parser=None): parser = set_base_parser() - from jina.parsers.client import (mixin_client_features_parser, - mixin_client_protocol_parser) + from jina.parsers.client import ( + mixin_client_features_parser, + mixin_client_protocol_parser, + ) from jina.parsers.orchestrate.runtimes.remote import ( - mixin_client_gateway_parser, mixin_prefetch_parser) + mixin_client_gateway_parser, + mixin_prefetch_parser, + ) mixin_client_gateway_parser(parser) mixin_client_features_parser(parser) diff --git a/jina/parsers/base.py b/jina/parsers/base.py index d6fc43f348bfe..31653575b88e0 100644 --- a/jina/parsers/base.py +++ b/jina/parsers/base.py @@ -1,4 +1,5 @@ """Module containing the base parser for arguments of Jina.""" + import argparse from jina.parsers.helper import _chf diff --git a/jina/parsers/create.py b/jina/parsers/create.py index 95054c208181f..f5f6db45302cc 100644 --- a/jina/parsers/create.py +++ b/jina/parsers/create.py @@ -15,8 +15,11 @@ def set_new_project_parser(parser=None): parser.add_argument( 'name', type=str, help='The name of the project', default='hello-jina' ) - + parser.add_argument( - '--type', type=str, help='The type of project to be created (either flow or deployment)', default='flow' + '--type', + type=str, + help='The type of project to be created (either flow or deployment)', + default='flow', ) return parser diff --git a/jina/parsers/export.py b/jina/parsers/export.py index 1378bb35c0ad8..1dede8be82ac9 100644 --- a/jina/parsers/export.py +++ b/jina/parsers/export.py @@ -61,7 +61,10 @@ def mixin_base_io_parser(parser): """ parser.add_argument( - 'config_path', type=str, metavar='INPUT', help='The input file path of a Flow or Deployment YAML ' + 'config_path', + type=str, + metavar='INPUT', + help='The input file path of a Flow or Deployment YAML ', ) parser.add_argument( 'outpath', diff --git a/jina/parsers/helper.py b/jina/parsers/helper.py index bbc19029fb8e0..7b5105ce1e4de 100644 --- a/jina/parsers/helper.py +++ b/jina/parsers/helper.py @@ -262,7 +262,7 @@ def _para_reformat(self, text, width): return lines -def _get_gateway_class(protocol, works_as_load_balancer = False): +def _get_gateway_class(protocol, works_as_load_balancer=False): from jina.serve.runtimes.gateway.grpc import GRPCGateway from jina.serve.runtimes.gateway.http import HTTPGateway from jina.serve.runtimes.gateway.websocket import WebSocketGateway @@ -274,6 +274,7 @@ def _get_gateway_class(protocol, works_as_load_balancer = False): } if protocol == ProtocolType.HTTP and works_as_load_balancer: from jina.serve.runtimes.gateway.load_balancer import LoadBalancerGateway + return LoadBalancerGateway else: return gateway_dict[protocol] @@ -286,6 +287,7 @@ def _set_gateway_uses(args: 'argparse.Namespace', gateway_load_balancer: bool = elif len(args.protocol) > len(args.port): if len(args.port) == 1: from jina.helper import random_port + args.port = [] for _ in range(len(args.protocol)): args.port.append(random_port()) @@ -295,9 +297,11 @@ def _set_gateway_uses(args: 'argparse.Namespace', gateway_load_balancer: bool = ) if len(args.protocol) > 1: from jina.serve.runtimes.gateway.composite import CompositeGateway + args.uses = CompositeGateway.__name__ elif gateway_load_balancer: from jina.serve.runtimes.gateway.load_balancer import LoadBalancerGateway + args.uses = LoadBalancerGateway.__name__ @@ -341,6 +345,7 @@ def __call__(self, parser, args, values, option_string=None): :param option_string: inherited, not used """ import json + d = {0: []} for value in values: if isinstance(value, str): diff --git a/jina/parsers/orchestrate/base.py b/jina/parsers/orchestrate/base.py index 06ef6d65247e8..91bac5aba1dda 100644 --- a/jina/parsers/orchestrate/base.py +++ b/jina/parsers/orchestrate/base.py @@ -1,4 +1,5 @@ """Base argparser module for Pod and Deployment runtime""" + import argparse import os @@ -63,11 +64,13 @@ def mixin_essential_parser(parser, default_name=None): '--workspace-id', type=str, default=random_identity(), - help='the UUID for identifying the workspace. When not given a random id will be assigned.' - 'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same ' - '`workspace-id`.' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'the UUID for identifying the workspace. When not given a random id will be assigned.' + 'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same ' + '`workspace-id`.' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) @@ -89,9 +92,11 @@ def mixin_base_deployment_parser(parser, title='Base Deployment', default_name=N type=str, default=[], nargs='*', - help='Extra search paths to be used when loading modules and finding YAML config files.' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'Extra search paths to be used when loading modules and finding YAML config files.' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) gp.add_argument( @@ -104,9 +109,11 @@ def mixin_base_deployment_parser(parser, title='Base Deployment', default_name=N gp.add_argument( '--k8s-namespace', type=str, - help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) return gp diff --git a/jina/parsers/orchestrate/deployment.py b/jina/parsers/orchestrate/deployment.py index 28f73f1d9d8b4..0199c59822678 100644 --- a/jina/parsers/orchestrate/deployment.py +++ b/jina/parsers/orchestrate/deployment.py @@ -1,4 +1,5 @@ """Argparser module for Deployment runtimes""" + import argparse from jina.enums import DeploymentRoleType @@ -58,9 +59,11 @@ def mixin_base_deployment_parser(parser): '--deployment-role', type=DeploymentRoleType.from_string, choices=list(DeploymentRoleType), - help='The role of this deployment in the flow' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'The role of this deployment in the flow' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) gp.add_argument( diff --git a/jina/parsers/orchestrate/pod.py b/jina/parsers/orchestrate/pod.py index f9927172b0245..fe908bfa463de 100644 --- a/jina/parsers/orchestrate/pod.py +++ b/jina/parsers/orchestrate/pod.py @@ -68,18 +68,22 @@ def mixin_pod_parser(parser, pod_type: str = 'worker'): action=KVAppendAction, metavar='KEY: VALUE', nargs='*', - help='The map of environment variables that are read from kubernetes cluster secrets' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'The map of environment variables that are read from kubernetes cluster secrets' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) gp.add_argument( '--image-pull-secrets', type=str, nargs='+', default=None, - help='List of ImagePullSecrets that the Kubernetes Pods need to have access to in order to pull the image. Used in `to_kubernetes_yaml`' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'List of ImagePullSecrets that the Kubernetes Pods need to have access to in order to pull the image. Used in `to_kubernetes_yaml`' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) # hidden CLI used for internal only @@ -88,9 +92,11 @@ def mixin_pod_parser(parser, pod_type: str = 'worker'): '--shard-id', type=int, default=0, - help='defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) gp.add_argument( @@ -98,19 +104,23 @@ def mixin_pod_parser(parser, pod_type: str = 'worker'): type=PodRoleType.from_string, choices=list(PodRoleType), default=POD_PARAMS_MAPPING[pod_type].role_type, - help='The role of this Pod in a Deployment' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'The role of this Pod in a Deployment' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) gp.add_argument( '--noblock-on-start', action='store_true', default=False, - help='If set, starting a Pod/Deployment does not block the thread/process. It then relies on ' - '`wait_start_success` at outer function for the postpone check.' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'If set, starting a Pod/Deployment does not block the thread/process. It then relies on ' + '`wait_start_success` at outer function for the postpone check.' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) gp.add_argument( @@ -125,9 +135,11 @@ def mixin_pod_parser(parser, pod_type: str = 'worker'): '--replica-id', type=int, default=0, - help='defines the replica identifier for the executor. It is used when `stateful` is set to true' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + help=( + 'defines the replica identifier for the executor. It is used when `stateful` is set to true' + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) if pod_type != 'gateway': diff --git a/jina/parsers/orchestrate/runtimes/container.py b/jina/parsers/orchestrate/runtimes/container.py index 49c8eb536233d..7fb6780ec6bfc 100644 --- a/jina/parsers/orchestrate/runtimes/container.py +++ b/jina/parsers/orchestrate/runtimes/container.py @@ -1,4 +1,5 @@ """Argparser module for container runtimes""" + import argparse from jina.enums import DockerNetworkMode @@ -74,7 +75,8 @@ def mixin_container_runtime_parser(parser, pod_type: str = 'executor'): type=DockerNetworkMode.from_string, choices=list(DockerNetworkMode), default=DockerNetworkMode.AUTO, - help=f''' + help=( + f''' Force the use of the specified docker network mode (default: auto). Valid options are, @@ -83,6 +85,7 @@ def mixin_container_runtime_parser(parser, pod_type: str = 'executor'): - {str(DockerNetworkMode.BRIDGE)}: Use a user-defined bridge network. - {str(DockerNetworkMode.NONE)}: Use no network (equivalent to the --network=none option). ''' - if _SHOW_ALL_ARGS - else argparse.SUPPRESS, + if _SHOW_ALL_ARGS + else argparse.SUPPRESS + ), ) diff --git a/jina/parsers/orchestrate/runtimes/remote.py b/jina/parsers/orchestrate/runtimes/remote.py index 0b95d1672b8e6..98a244e4709ee 100644 --- a/jina/parsers/orchestrate/runtimes/remote.py +++ b/jina/parsers/orchestrate/runtimes/remote.py @@ -21,8 +21,8 @@ def mixin_remote_runtime_parser(parser): default=[__default_host__], action=CastHostAction, help=f'The host of the Gateway, which the client should connect to, by default it is {__default_host__}.' - ' In the case of an external Executor (`--external` or `external=True`) this can be a list of hosts. ' - ' Then, every resulting address will be considered as one replica of the Executor.', + ' In the case of an external Executor (`--external` or `external=True`) this can be a list of hosts. ' + ' Then, every resulting address will be considered as one replica of the Executor.', ) @@ -100,7 +100,7 @@ def mixin_gateway_streamer_parser(arg_group): '--compression', choices=['NoCompression', 'Deflate', 'Gzip'], help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, ' - 'check https://grpc.github.io/grpc/python/grpc.html#compression.', + 'check https://grpc.github.io/grpc/python/grpc.html#compression.', ) arg_group.add_argument( @@ -190,8 +190,8 @@ def _add_proxy(arg_group): action='store_true', default=False, help='If set, respect the http_proxy and https_proxy environment variables. ' - 'otherwise, it will unset these proxy variables before start. ' - 'gRPC seems to prefer no proxy', + 'otherwise, it will unset these proxy variables before start. ' + 'gRPC seems to prefer no proxy', ) diff --git a/jina/proto/docarray_v1/pb/jina_pb2.py b/jina/proto/docarray_v1/pb/jina_pb2.py index 92af6004f8d01..28830e99b0dd0 100644 --- a/jina/proto/docarray_v1/pb/jina_pb2.py +++ b/jina/proto/docarray_v1/pb/jina_pb2.py @@ -6,6 +6,7 @@ from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -17,81 +18,83 @@ import docarray.proto.pb.docarray_pb2 as docarray__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\njina.proto\x12\x04jina\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x0e\x64ocarray.proto\"\x9f\x01\n\nRouteProto\x12\x10\n\x08\x65xecutor\x18\x01 \x01(\t\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x06status\x18\x04 \x01(\x0b\x32\x11.jina.StatusProto\"\xc3\x01\n\rJinaInfoProto\x12+\n\x04jina\x18\x01 \x03(\x0b\x32\x1d.jina.JinaInfoProto.JinaEntry\x12+\n\x04\x65nvs\x18\x02 \x03(\x0b\x32\x1d.jina.JinaInfoProto.EnvsEntry\x1a+\n\tJinaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc6\x01\n\x0bHeaderProto\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0b\x32\x11.jina.StatusProto\x12\x1a\n\rexec_endpoint\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x0ftarget_executor\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07timeout\x18\x05 \x01(\rH\x02\x88\x01\x01\x42\x10\n\x0e_exec_endpointB\x12\n\x10_target_executorB\n\n\x08_timeout\"f\n\x0e\x45ndpointsProto\x12\x11\n\tendpoints\x18\x01 \x03(\t\x12\x17\n\x0fwrite_endpoints\x18\x02 \x03(\t\x12(\n\x07schemas\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xf9\x01\n\x0bStatusProto\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.jina.StatusProto.StatusCode\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x33\n\texception\x18\x03 \x01(\x0b\x32 .jina.StatusProto.ExceptionProto\x1aN\n\x0e\x45xceptionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x0e\n\x06stacks\x18\x03 \x03(\t\x12\x10\n\x08\x65xecutor\x18\x04 \x01(\t\"$\n\nStatusCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"^\n\rRelatedEntity\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\x08shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x42\x0b\n\t_shard_id\"\xa0\x02\n\x10\x44\x61taRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.jina.DataRequestProto.DataContentProto\x1a\x63\n\x10\x44\x61taContentProto\x12,\n\x04\x64ocs\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12\x14\n\ndocs_bytes\x18\x02 \x01(\x0cH\x00\x42\x0b\n\tdocuments\"\xb9\x01\n\x1aSingleDocumentRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12)\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProto\"\x8a\x01\n\x16\x44\x61taRequestProtoWoData\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\"@\n\x14\x44\x61taRequestListProto\x12(\n\x08requests\x18\x01 \x03(\x0b\x32\x16.jina.DataRequestProto\"\x1b\n\nSnapshotId\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tRestoreId\x12\r\n\x05value\x18\x01 \x01(\t\"\xef\x01\n\x13SnapshotStatusProto\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.jina.SnapshotId\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .jina.SnapshotStatusProto.Status\x12\x15\n\rsnapshot_file\x18\x03 \x01(\t\"q\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\r\n\tNOT_FOUND\x10\x06\"\xca\x01\n\x1aRestoreSnapshotStatusProto\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.jina.RestoreId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.jina.RestoreSnapshotStatusProto.Status\"V\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tNOT_FOUND\x10\x06\"/\n\x16RestoreSnapshotCommand\x12\x15\n\rsnapshot_file\x18\x01 \x01(\t2Z\n\x12JinaDataRequestRPC\x12\x44\n\x0cprocess_data\x12\x1a.jina.DataRequestListProto\x1a\x16.jina.DataRequestProto\"\x00\x32\x63\n\x18JinaSingleDataRequestRPC\x12G\n\x13process_single_data\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00\x32t\n\x1cJinaSingleDocumentRequestRPC\x12T\n\nstream_doc\x12 .jina.SingleDocumentRequestProto\x1a .jina.SingleDocumentRequestProto\"\x00\x30\x01\x32G\n\x07JinaRPC\x12<\n\x04\x43\x61ll\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00(\x01\x30\x01\x32`\n\x18JinaDiscoverEndpointsRPC\x12\x44\n\x12\x65ndpoint_discovery\x12\x16.google.protobuf.Empty\x1a\x14.jina.EndpointsProto\"\x00\x32N\n\x14JinaGatewayDryRunRPC\x12\x36\n\x07\x64ry_run\x12\x16.google.protobuf.Empty\x1a\x11.jina.StatusProto\"\x00\x32G\n\x0bJinaInfoRPC\x12\x38\n\x07_status\x12\x16.google.protobuf.Empty\x1a\x13.jina.JinaInfoProto\"\x00\x32W\n\x14JinaExecutorSnapshot\x12?\n\x08snapshot\x12\x16.google.protobuf.Empty\x1a\x19.jina.SnapshotStatusProto\"\x00\x32`\n\x1cJinaExecutorSnapshotProgress\x12@\n\x0fsnapshot_status\x12\x10.jina.SnapshotId\x1a\x19.jina.SnapshotStatusProto\"\x00\x32\x62\n\x13JinaExecutorRestore\x12K\n\x07restore\x12\x1c.jina.RestoreSnapshotCommand\x1a .jina.RestoreSnapshotStatusProto\"\x00\x32\x64\n\x1bJinaExecutorRestoreProgress\x12\x45\n\x0erestore_status\x12\x0f.jina.RestoreId\x1a .jina.RestoreSnapshotStatusProto\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\njina.proto\x12\x04jina\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x0e\x64ocarray.proto\"\x9f\x01\n\nRouteProto\x12\x10\n\x08\x65xecutor\x18\x01 \x01(\t\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x06status\x18\x04 \x01(\x0b\x32\x11.jina.StatusProto\"\xc3\x01\n\rJinaInfoProto\x12+\n\x04jina\x18\x01 \x03(\x0b\x32\x1d.jina.JinaInfoProto.JinaEntry\x12+\n\x04\x65nvs\x18\x02 \x03(\x0b\x32\x1d.jina.JinaInfoProto.EnvsEntry\x1a+\n\tJinaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc6\x01\n\x0bHeaderProto\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0b\x32\x11.jina.StatusProto\x12\x1a\n\rexec_endpoint\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x0ftarget_executor\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07timeout\x18\x05 \x01(\rH\x02\x88\x01\x01\x42\x10\n\x0e_exec_endpointB\x12\n\x10_target_executorB\n\n\x08_timeout\"f\n\x0e\x45ndpointsProto\x12\x11\n\tendpoints\x18\x01 \x03(\t\x12\x17\n\x0fwrite_endpoints\x18\x02 \x03(\t\x12(\n\x07schemas\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xf9\x01\n\x0bStatusProto\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.jina.StatusProto.StatusCode\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x33\n\texception\x18\x03 \x01(\x0b\x32 .jina.StatusProto.ExceptionProto\x1aN\n\x0e\x45xceptionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x0e\n\x06stacks\x18\x03 \x03(\t\x12\x10\n\x08\x65xecutor\x18\x04 \x01(\t\"$\n\nStatusCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"^\n\rRelatedEntity\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\x08shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x42\x0b\n\t_shard_id\"\xa0\x02\n\x10\x44\x61taRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.jina.DataRequestProto.DataContentProto\x1a\x63\n\x10\x44\x61taContentProto\x12,\n\x04\x64ocs\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12\x14\n\ndocs_bytes\x18\x02 \x01(\x0cH\x00\x42\x0b\n\tdocuments\"\xb9\x01\n\x1aSingleDocumentRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12)\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProto\"\x8a\x01\n\x16\x44\x61taRequestProtoWoData\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\"@\n\x14\x44\x61taRequestListProto\x12(\n\x08requests\x18\x01 \x03(\x0b\x32\x16.jina.DataRequestProto\"\x1b\n\nSnapshotId\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tRestoreId\x12\r\n\x05value\x18\x01 \x01(\t\"\xef\x01\n\x13SnapshotStatusProto\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.jina.SnapshotId\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .jina.SnapshotStatusProto.Status\x12\x15\n\rsnapshot_file\x18\x03 \x01(\t\"q\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\r\n\tNOT_FOUND\x10\x06\"\xca\x01\n\x1aRestoreSnapshotStatusProto\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.jina.RestoreId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.jina.RestoreSnapshotStatusProto.Status\"V\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tNOT_FOUND\x10\x06\"/\n\x16RestoreSnapshotCommand\x12\x15\n\rsnapshot_file\x18\x01 \x01(\t2Z\n\x12JinaDataRequestRPC\x12\x44\n\x0cprocess_data\x12\x1a.jina.DataRequestListProto\x1a\x16.jina.DataRequestProto\"\x00\x32\x63\n\x18JinaSingleDataRequestRPC\x12G\n\x13process_single_data\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00\x32t\n\x1cJinaSingleDocumentRequestRPC\x12T\n\nstream_doc\x12 .jina.SingleDocumentRequestProto\x1a .jina.SingleDocumentRequestProto\"\x00\x30\x01\x32G\n\x07JinaRPC\x12<\n\x04\x43\x61ll\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00(\x01\x30\x01\x32`\n\x18JinaDiscoverEndpointsRPC\x12\x44\n\x12\x65ndpoint_discovery\x12\x16.google.protobuf.Empty\x1a\x14.jina.EndpointsProto\"\x00\x32N\n\x14JinaGatewayDryRunRPC\x12\x36\n\x07\x64ry_run\x12\x16.google.protobuf.Empty\x1a\x11.jina.StatusProto\"\x00\x32G\n\x0bJinaInfoRPC\x12\x38\n\x07_status\x12\x16.google.protobuf.Empty\x1a\x13.jina.JinaInfoProto\"\x00\x32W\n\x14JinaExecutorSnapshot\x12?\n\x08snapshot\x12\x16.google.protobuf.Empty\x1a\x19.jina.SnapshotStatusProto\"\x00\x32`\n\x1cJinaExecutorSnapshotProgress\x12@\n\x0fsnapshot_status\x12\x10.jina.SnapshotId\x1a\x19.jina.SnapshotStatusProto\"\x00\x32\x62\n\x13JinaExecutorRestore\x12K\n\x07restore\x12\x1c.jina.RestoreSnapshotCommand\x1a .jina.RestoreSnapshotStatusProto\"\x00\x32\x64\n\x1bJinaExecutorRestoreProgress\x12\x45\n\x0erestore_status\x12\x0f.jina.RestoreId\x1a .jina.RestoreSnapshotStatusProto\"\x00\x62\x06proto3' +) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'jina_pb2', globals()) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _JINAINFOPROTO_JINAENTRY._options = None - _JINAINFOPROTO_JINAENTRY._serialized_options = b'8\001' - _JINAINFOPROTO_ENVSENTRY._options = None - _JINAINFOPROTO_ENVSENTRY._serialized_options = b'8\001' - _ROUTEPROTO._serialized_start=129 - _ROUTEPROTO._serialized_end=288 - _JINAINFOPROTO._serialized_start=291 - _JINAINFOPROTO._serialized_end=486 - _JINAINFOPROTO_JINAENTRY._serialized_start=398 - _JINAINFOPROTO_JINAENTRY._serialized_end=441 - _JINAINFOPROTO_ENVSENTRY._serialized_start=443 - _JINAINFOPROTO_ENVSENTRY._serialized_end=486 - _HEADERPROTO._serialized_start=489 - _HEADERPROTO._serialized_end=687 - _ENDPOINTSPROTO._serialized_start=689 - _ENDPOINTSPROTO._serialized_end=791 - _STATUSPROTO._serialized_start=794 - _STATUSPROTO._serialized_end=1043 - _STATUSPROTO_EXCEPTIONPROTO._serialized_start=927 - _STATUSPROTO_EXCEPTIONPROTO._serialized_end=1005 - _STATUSPROTO_STATUSCODE._serialized_start=1007 - _STATUSPROTO_STATUSCODE._serialized_end=1043 - _RELATEDENTITY._serialized_start=1045 - _RELATEDENTITY._serialized_end=1139 - _DATAREQUESTPROTO._serialized_start=1142 - _DATAREQUESTPROTO._serialized_end=1430 - _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_start=1331 - _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_end=1430 - _SINGLEDOCUMENTREQUESTPROTO._serialized_start=1433 - _SINGLEDOCUMENTREQUESTPROTO._serialized_end=1618 - _DATAREQUESTPROTOWODATA._serialized_start=1621 - _DATAREQUESTPROTOWODATA._serialized_end=1759 - _DATAREQUESTLISTPROTO._serialized_start=1761 - _DATAREQUESTLISTPROTO._serialized_end=1825 - _SNAPSHOTID._serialized_start=1827 - _SNAPSHOTID._serialized_end=1854 - _RESTOREID._serialized_start=1856 - _RESTOREID._serialized_end=1882 - _SNAPSHOTSTATUSPROTO._serialized_start=1885 - _SNAPSHOTSTATUSPROTO._serialized_end=2124 - _SNAPSHOTSTATUSPROTO_STATUS._serialized_start=2011 - _SNAPSHOTSTATUSPROTO_STATUS._serialized_end=2124 - _RESTORESNAPSHOTSTATUSPROTO._serialized_start=2127 - _RESTORESNAPSHOTSTATUSPROTO._serialized_end=2329 - _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_start=2243 - _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_end=2329 - _RESTORESNAPSHOTCOMMAND._serialized_start=2331 - _RESTORESNAPSHOTCOMMAND._serialized_end=2378 - _JINADATAREQUESTRPC._serialized_start=2380 - _JINADATAREQUESTRPC._serialized_end=2470 - _JINASINGLEDATAREQUESTRPC._serialized_start=2472 - _JINASINGLEDATAREQUESTRPC._serialized_end=2571 - _JINASINGLEDOCUMENTREQUESTRPC._serialized_start=2573 - _JINASINGLEDOCUMENTREQUESTRPC._serialized_end=2689 - _JINARPC._serialized_start=2691 - _JINARPC._serialized_end=2762 - _JINADISCOVERENDPOINTSRPC._serialized_start=2764 - _JINADISCOVERENDPOINTSRPC._serialized_end=2860 - _JINAGATEWAYDRYRUNRPC._serialized_start=2862 - _JINAGATEWAYDRYRUNRPC._serialized_end=2940 - _JINAINFORPC._serialized_start=2942 - _JINAINFORPC._serialized_end=3013 - _JINAEXECUTORSNAPSHOT._serialized_start=3015 - _JINAEXECUTORSNAPSHOT._serialized_end=3102 - _JINAEXECUTORSNAPSHOTPROGRESS._serialized_start=3104 - _JINAEXECUTORSNAPSHOTPROGRESS._serialized_end=3200 - _JINAEXECUTORRESTORE._serialized_start=3202 - _JINAEXECUTORRESTORE._serialized_end=3300 - _JINAEXECUTORRESTOREPROGRESS._serialized_start=3302 - _JINAEXECUTORRESTOREPROGRESS._serialized_end=3402 + DESCRIPTOR._options = None + _JINAINFOPROTO_JINAENTRY._options = None + _JINAINFOPROTO_JINAENTRY._serialized_options = b'8\001' + _JINAINFOPROTO_ENVSENTRY._options = None + _JINAINFOPROTO_ENVSENTRY._serialized_options = b'8\001' + _ROUTEPROTO._serialized_start = 129 + _ROUTEPROTO._serialized_end = 288 + _JINAINFOPROTO._serialized_start = 291 + _JINAINFOPROTO._serialized_end = 486 + _JINAINFOPROTO_JINAENTRY._serialized_start = 398 + _JINAINFOPROTO_JINAENTRY._serialized_end = 441 + _JINAINFOPROTO_ENVSENTRY._serialized_start = 443 + _JINAINFOPROTO_ENVSENTRY._serialized_end = 486 + _HEADERPROTO._serialized_start = 489 + _HEADERPROTO._serialized_end = 687 + _ENDPOINTSPROTO._serialized_start = 689 + _ENDPOINTSPROTO._serialized_end = 791 + _STATUSPROTO._serialized_start = 794 + _STATUSPROTO._serialized_end = 1043 + _STATUSPROTO_EXCEPTIONPROTO._serialized_start = 927 + _STATUSPROTO_EXCEPTIONPROTO._serialized_end = 1005 + _STATUSPROTO_STATUSCODE._serialized_start = 1007 + _STATUSPROTO_STATUSCODE._serialized_end = 1043 + _RELATEDENTITY._serialized_start = 1045 + _RELATEDENTITY._serialized_end = 1139 + _DATAREQUESTPROTO._serialized_start = 1142 + _DATAREQUESTPROTO._serialized_end = 1430 + _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_start = 1331 + _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_end = 1430 + _SINGLEDOCUMENTREQUESTPROTO._serialized_start = 1433 + _SINGLEDOCUMENTREQUESTPROTO._serialized_end = 1618 + _DATAREQUESTPROTOWODATA._serialized_start = 1621 + _DATAREQUESTPROTOWODATA._serialized_end = 1759 + _DATAREQUESTLISTPROTO._serialized_start = 1761 + _DATAREQUESTLISTPROTO._serialized_end = 1825 + _SNAPSHOTID._serialized_start = 1827 + _SNAPSHOTID._serialized_end = 1854 + _RESTOREID._serialized_start = 1856 + _RESTOREID._serialized_end = 1882 + _SNAPSHOTSTATUSPROTO._serialized_start = 1885 + _SNAPSHOTSTATUSPROTO._serialized_end = 2124 + _SNAPSHOTSTATUSPROTO_STATUS._serialized_start = 2011 + _SNAPSHOTSTATUSPROTO_STATUS._serialized_end = 2124 + _RESTORESNAPSHOTSTATUSPROTO._serialized_start = 2127 + _RESTORESNAPSHOTSTATUSPROTO._serialized_end = 2329 + _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_start = 2243 + _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_end = 2329 + _RESTORESNAPSHOTCOMMAND._serialized_start = 2331 + _RESTORESNAPSHOTCOMMAND._serialized_end = 2378 + _JINADATAREQUESTRPC._serialized_start = 2380 + _JINADATAREQUESTRPC._serialized_end = 2470 + _JINASINGLEDATAREQUESTRPC._serialized_start = 2472 + _JINASINGLEDATAREQUESTRPC._serialized_end = 2571 + _JINASINGLEDOCUMENTREQUESTRPC._serialized_start = 2573 + _JINASINGLEDOCUMENTREQUESTRPC._serialized_end = 2689 + _JINARPC._serialized_start = 2691 + _JINARPC._serialized_end = 2762 + _JINADISCOVERENDPOINTSRPC._serialized_start = 2764 + _JINADISCOVERENDPOINTSRPC._serialized_end = 2860 + _JINAGATEWAYDRYRUNRPC._serialized_start = 2862 + _JINAGATEWAYDRYRUNRPC._serialized_end = 2940 + _JINAINFORPC._serialized_start = 2942 + _JINAINFORPC._serialized_end = 3013 + _JINAEXECUTORSNAPSHOT._serialized_start = 3015 + _JINAEXECUTORSNAPSHOT._serialized_end = 3102 + _JINAEXECUTORSNAPSHOTPROGRESS._serialized_start = 3104 + _JINAEXECUTORSNAPSHOTPROGRESS._serialized_end = 3200 + _JINAEXECUTORRESTORE._serialized_start = 3202 + _JINAEXECUTORRESTORE._serialized_end = 3300 + _JINAEXECUTORRESTOREPROGRESS._serialized_start = 3302 + _JINAEXECUTORRESTOREPROGRESS._serialized_end = 3402 # @@protoc_insertion_point(module_scope) diff --git a/jina/proto/docarray_v1/pb/jina_pb2_grpc.py b/jina/proto/docarray_v1/pb/jina_pb2_grpc.py index f52ce19e69412..f571beae83675 100644 --- a/jina/proto/docarray_v1/pb/jina_pb2_grpc.py +++ b/jina/proto/docarray_v1/pb/jina_pb2_grpc.py @@ -18,10 +18,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.process_data = channel.unary_unary( - '/jina.JinaDataRequestRPC/process_data', - request_serializer=jina__pb2.DataRequestListProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaDataRequestRPC/process_data', + request_serializer=jina__pb2.DataRequestListProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaDataRequestRPCServicer(object): @@ -30,8 +30,7 @@ class JinaDataRequestRPCServicer(object): """ def process_data(self, request, context): - """Used for passing DataRequests to the Executors - """ + """Used for passing DataRequests to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -39,39 +38,52 @@ def process_data(self, request, context): def add_JinaDataRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'process_data': grpc.unary_unary_rpc_method_handler( - servicer.process_data, - request_deserializer=jina__pb2.DataRequestListProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'process_data': grpc.unary_unary_rpc_method_handler( + servicer.process_data, + request_deserializer=jina__pb2.DataRequestListProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaDataRequestRPC', rpc_method_handlers) + 'jina.JinaDataRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaDataRequestRPC(object): """* jina gRPC service for DataRequests. """ @staticmethod - def process_data(request, + def process_data( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaDataRequestRPC/process_data', + '/jina.JinaDataRequestRPC/process_data', jina__pb2.DataRequestListProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaSingleDataRequestRPCStub(object): @@ -87,10 +99,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.process_single_data = channel.unary_unary( - '/jina.JinaSingleDataRequestRPC/process_single_data', - request_serializer=jina__pb2.DataRequestProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaSingleDataRequestRPC/process_single_data', + request_serializer=jina__pb2.DataRequestProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaSingleDataRequestRPCServicer(object): @@ -100,8 +112,7 @@ class JinaSingleDataRequestRPCServicer(object): """ def process_single_data(self, request, context): - """Used for passing DataRequests to the Executors - """ + """Used for passing DataRequests to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -109,18 +120,19 @@ def process_single_data(self, request, context): def add_JinaSingleDataRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'process_single_data': grpc.unary_unary_rpc_method_handler( - servicer.process_single_data, - request_deserializer=jina__pb2.DataRequestProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'process_single_data': grpc.unary_unary_rpc_method_handler( + servicer.process_single_data, + request_deserializer=jina__pb2.DataRequestProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaSingleDataRequestRPC', rpc_method_handlers) + 'jina.JinaSingleDataRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaSingleDataRequestRPC(object): """* jina gRPC service for DataRequests. @@ -128,21 +140,33 @@ class JinaSingleDataRequestRPC(object): """ @staticmethod - def process_single_data(request, + def process_single_data( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaSingleDataRequestRPC/process_single_data', + '/jina.JinaSingleDataRequestRPC/process_single_data', jina__pb2.DataRequestProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaSingleDocumentRequestRPCStub(object): @@ -158,10 +182,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.stream_doc = channel.unary_stream( - '/jina.JinaSingleDocumentRequestRPC/stream_doc', - request_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, - response_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, - ) + '/jina.JinaSingleDocumentRequestRPC/stream_doc', + request_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, + response_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, + ) class JinaSingleDocumentRequestRPCServicer(object): @@ -171,8 +195,7 @@ class JinaSingleDocumentRequestRPCServicer(object): """ def stream_doc(self, request, context): - """Used for streaming one document to the Executors - """ + """Used for streaming one document to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -180,18 +203,19 @@ def stream_doc(self, request, context): def add_JinaSingleDocumentRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'stream_doc': grpc.unary_stream_rpc_method_handler( - servicer.stream_doc, - request_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, - response_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, - ), + 'stream_doc': grpc.unary_stream_rpc_method_handler( + servicer.stream_doc, + request_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, + response_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaSingleDocumentRequestRPC', rpc_method_handlers) + 'jina.JinaSingleDocumentRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaSingleDocumentRequestRPC(object): """* jina gRPC service for DataRequests. @@ -199,21 +223,33 @@ class JinaSingleDocumentRequestRPC(object): """ @staticmethod - def stream_doc(request, + def stream_doc( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream(request, target, '/jina.JinaSingleDocumentRequestRPC/stream_doc', + '/jina.JinaSingleDocumentRequestRPC/stream_doc', jina__pb2.SingleDocumentRequestProto.SerializeToString, jina__pb2.SingleDocumentRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaRPCStub(object): @@ -228,10 +264,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.Call = channel.stream_stream( - '/jina.JinaRPC/Call', - request_serializer=jina__pb2.DataRequestProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaRPC/Call', + request_serializer=jina__pb2.DataRequestProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaRPCServicer(object): @@ -240,8 +276,7 @@ class JinaRPCServicer(object): """ def Call(self, request_iterator, context): - """Pass in a Request and a filled Request with matches will be returned. - """ + """Pass in a Request and a filled Request with matches will be returned.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -249,39 +284,52 @@ def Call(self, request_iterator, context): def add_JinaRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'Call': grpc.stream_stream_rpc_method_handler( - servicer.Call, - request_deserializer=jina__pb2.DataRequestProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'Call': grpc.stream_stream_rpc_method_handler( + servicer.Call, + request_deserializer=jina__pb2.DataRequestProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaRPC', rpc_method_handlers) + 'jina.JinaRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaRPC(object): """* jina streaming gRPC service. """ @staticmethod - def Call(request_iterator, + def Call( + request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.stream_stream( + request_iterator, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream(request_iterator, target, '/jina.JinaRPC/Call', + '/jina.JinaRPC/Call', jina__pb2.DataRequestProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaDiscoverEndpointsRPCStub(object): @@ -296,10 +344,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.endpoint_discovery = channel.unary_unary( - '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.EndpointsProto.FromString, - ) + '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.EndpointsProto.FromString, + ) class JinaDiscoverEndpointsRPCServicer(object): @@ -316,39 +364,52 @@ def endpoint_discovery(self, request, context): def add_JinaDiscoverEndpointsRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'endpoint_discovery': grpc.unary_unary_rpc_method_handler( - servicer.endpoint_discovery, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.EndpointsProto.SerializeToString, - ), + 'endpoint_discovery': grpc.unary_unary_rpc_method_handler( + servicer.endpoint_discovery, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.EndpointsProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaDiscoverEndpointsRPC', rpc_method_handlers) + 'jina.JinaDiscoverEndpointsRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaDiscoverEndpointsRPC(object): """* jina gRPC service to expose Endpoints from Executors. """ @staticmethod - def endpoint_discovery(request, + def endpoint_discovery( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', + '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.EndpointsProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaGatewayDryRunRPCStub(object): @@ -363,10 +424,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.dry_run = channel.unary_unary( - '/jina.JinaGatewayDryRunRPC/dry_run', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.StatusProto.FromString, - ) + '/jina.JinaGatewayDryRunRPC/dry_run', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.StatusProto.FromString, + ) class JinaGatewayDryRunRPCServicer(object): @@ -383,39 +444,52 @@ def dry_run(self, request, context): def add_JinaGatewayDryRunRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'dry_run': grpc.unary_unary_rpc_method_handler( - servicer.dry_run, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.StatusProto.SerializeToString, - ), + 'dry_run': grpc.unary_unary_rpc_method_handler( + servicer.dry_run, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.StatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaGatewayDryRunRPC', rpc_method_handlers) + 'jina.JinaGatewayDryRunRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaGatewayDryRunRPC(object): """* jina gRPC service to expose Endpoints from Executors. """ @staticmethod - def dry_run(request, + def dry_run( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaGatewayDryRunRPC/dry_run', + '/jina.JinaGatewayDryRunRPC/dry_run', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.StatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaInfoRPCStub(object): @@ -430,10 +504,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self._status = channel.unary_unary( - '/jina.JinaInfoRPC/_status', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.JinaInfoProto.FromString, - ) + '/jina.JinaInfoRPC/_status', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.JinaInfoProto.FromString, + ) class JinaInfoRPCServicer(object): @@ -450,39 +524,52 @@ def _status(self, request, context): def add_JinaInfoRPCServicer_to_server(servicer, server): rpc_method_handlers = { - '_status': grpc.unary_unary_rpc_method_handler( - servicer._status, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.JinaInfoProto.SerializeToString, - ), + '_status': grpc.unary_unary_rpc_method_handler( + servicer._status, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.JinaInfoProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaInfoRPC', rpc_method_handlers) + 'jina.JinaInfoRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaInfoRPC(object): """* jina gRPC service to expose information about running jina version and environment. """ @staticmethod - def _status(request, + def _status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaInfoRPC/_status', + '/jina.JinaInfoRPC/_status', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.JinaInfoProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorSnapshotStub(object): @@ -497,10 +584,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.snapshot = channel.unary_unary( - '/jina.JinaExecutorSnapshot/snapshot', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.SnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorSnapshot/snapshot', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.SnapshotStatusProto.FromString, + ) class JinaExecutorSnapshotServicer(object): @@ -517,39 +604,52 @@ def snapshot(self, request, context): def add_JinaExecutorSnapshotServicer_to_server(servicer, server): rpc_method_handlers = { - 'snapshot': grpc.unary_unary_rpc_method_handler( - servicer.snapshot, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, - ), + 'snapshot': grpc.unary_unary_rpc_method_handler( + servicer.snapshot, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorSnapshot', rpc_method_handlers) + 'jina.JinaExecutorSnapshot', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorSnapshot(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def snapshot(request, + def snapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorSnapshot/snapshot', + '/jina.JinaExecutorSnapshot/snapshot', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.SnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorSnapshotProgressStub(object): @@ -564,10 +664,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.snapshot_status = channel.unary_unary( - '/jina.JinaExecutorSnapshotProgress/snapshot_status', - request_serializer=jina__pb2.SnapshotId.SerializeToString, - response_deserializer=jina__pb2.SnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorSnapshotProgress/snapshot_status', + request_serializer=jina__pb2.SnapshotId.SerializeToString, + response_deserializer=jina__pb2.SnapshotStatusProto.FromString, + ) class JinaExecutorSnapshotProgressServicer(object): @@ -584,39 +684,52 @@ def snapshot_status(self, request, context): def add_JinaExecutorSnapshotProgressServicer_to_server(servicer, server): rpc_method_handlers = { - 'snapshot_status': grpc.unary_unary_rpc_method_handler( - servicer.snapshot_status, - request_deserializer=jina__pb2.SnapshotId.FromString, - response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, - ), + 'snapshot_status': grpc.unary_unary_rpc_method_handler( + servicer.snapshot_status, + request_deserializer=jina__pb2.SnapshotId.FromString, + response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorSnapshotProgress', rpc_method_handlers) + 'jina.JinaExecutorSnapshotProgress', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorSnapshotProgress(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def snapshot_status(request, + def snapshot_status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorSnapshotProgress/snapshot_status', + '/jina.JinaExecutorSnapshotProgress/snapshot_status', jina__pb2.SnapshotId.SerializeToString, jina__pb2.SnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorRestoreStub(object): @@ -631,10 +744,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.restore = channel.unary_unary( - '/jina.JinaExecutorRestore/restore', - request_serializer=jina__pb2.RestoreSnapshotCommand.SerializeToString, - response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorRestore/restore', + request_serializer=jina__pb2.RestoreSnapshotCommand.SerializeToString, + response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, + ) class JinaExecutorRestoreServicer(object): @@ -651,39 +764,52 @@ def restore(self, request, context): def add_JinaExecutorRestoreServicer_to_server(servicer, server): rpc_method_handlers = { - 'restore': grpc.unary_unary_rpc_method_handler( - servicer.restore, - request_deserializer=jina__pb2.RestoreSnapshotCommand.FromString, - response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, - ), + 'restore': grpc.unary_unary_rpc_method_handler( + servicer.restore, + request_deserializer=jina__pb2.RestoreSnapshotCommand.FromString, + response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorRestore', rpc_method_handlers) + 'jina.JinaExecutorRestore', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorRestore(object): """* jina gRPC service to trigger a restore at the Executor Runtime. """ @staticmethod - def restore(request, + def restore( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorRestore/restore', + '/jina.JinaExecutorRestore/restore', jina__pb2.RestoreSnapshotCommand.SerializeToString, jina__pb2.RestoreSnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorRestoreProgressStub(object): @@ -698,10 +824,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.restore_status = channel.unary_unary( - '/jina.JinaExecutorRestoreProgress/restore_status', - request_serializer=jina__pb2.RestoreId.SerializeToString, - response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorRestoreProgress/restore_status', + request_serializer=jina__pb2.RestoreId.SerializeToString, + response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, + ) class JinaExecutorRestoreProgressServicer(object): @@ -718,36 +844,49 @@ def restore_status(self, request, context): def add_JinaExecutorRestoreProgressServicer_to_server(servicer, server): rpc_method_handlers = { - 'restore_status': grpc.unary_unary_rpc_method_handler( - servicer.restore_status, - request_deserializer=jina__pb2.RestoreId.FromString, - response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, - ), + 'restore_status': grpc.unary_unary_rpc_method_handler( + servicer.restore_status, + request_deserializer=jina__pb2.RestoreId.FromString, + response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorRestoreProgress', rpc_method_handlers) + 'jina.JinaExecutorRestoreProgress', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorRestoreProgress(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def restore_status(request, + def restore_status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorRestoreProgress/restore_status', + '/jina.JinaExecutorRestoreProgress/restore_status', jina__pb2.RestoreId.SerializeToString, jina__pb2.RestoreSnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/jina/proto/docarray_v1/pb2/jina_pb2.py b/jina/proto/docarray_v1/pb2/jina_pb2.py index a768f5f0b7c58..1f11d28e5f226 100644 --- a/jina/proto/docarray_v1/pb2/jina_pb2.py +++ b/jina/proto/docarray_v1/pb2/jina_pb2.py @@ -7,6 +7,7 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -18,8 +19,9 @@ import docarray.proto.pb2.docarray_pb2 as docarray__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\njina.proto\x12\x04jina\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x0e\x64ocarray.proto\"\x9f\x01\n\nRouteProto\x12\x10\n\x08\x65xecutor\x18\x01 \x01(\t\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x06status\x18\x04 \x01(\x0b\x32\x11.jina.StatusProto\"\xc3\x01\n\rJinaInfoProto\x12+\n\x04jina\x18\x01 \x03(\x0b\x32\x1d.jina.JinaInfoProto.JinaEntry\x12+\n\x04\x65nvs\x18\x02 \x03(\x0b\x32\x1d.jina.JinaInfoProto.EnvsEntry\x1a+\n\tJinaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc6\x01\n\x0bHeaderProto\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0b\x32\x11.jina.StatusProto\x12\x1a\n\rexec_endpoint\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x0ftarget_executor\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07timeout\x18\x05 \x01(\rH\x02\x88\x01\x01\x42\x10\n\x0e_exec_endpointB\x12\n\x10_target_executorB\n\n\x08_timeout\"f\n\x0e\x45ndpointsProto\x12\x11\n\tendpoints\x18\x01 \x03(\t\x12\x17\n\x0fwrite_endpoints\x18\x02 \x03(\t\x12(\n\x07schemas\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xf9\x01\n\x0bStatusProto\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.jina.StatusProto.StatusCode\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x33\n\texception\x18\x03 \x01(\x0b\x32 .jina.StatusProto.ExceptionProto\x1aN\n\x0e\x45xceptionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x0e\n\x06stacks\x18\x03 \x03(\t\x12\x10\n\x08\x65xecutor\x18\x04 \x01(\t\"$\n\nStatusCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"^\n\rRelatedEntity\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\x08shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x42\x0b\n\t_shard_id\"\xa0\x02\n\x10\x44\x61taRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.jina.DataRequestProto.DataContentProto\x1a\x63\n\x10\x44\x61taContentProto\x12,\n\x04\x64ocs\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12\x14\n\ndocs_bytes\x18\x02 \x01(\x0cH\x00\x42\x0b\n\tdocuments\"\xb9\x01\n\x1aSingleDocumentRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12)\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProto\"\x8a\x01\n\x16\x44\x61taRequestProtoWoData\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\"@\n\x14\x44\x61taRequestListProto\x12(\n\x08requests\x18\x01 \x03(\x0b\x32\x16.jina.DataRequestProto\"\x1b\n\nSnapshotId\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tRestoreId\x12\r\n\x05value\x18\x01 \x01(\t\"\xef\x01\n\x13SnapshotStatusProto\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.jina.SnapshotId\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .jina.SnapshotStatusProto.Status\x12\x15\n\rsnapshot_file\x18\x03 \x01(\t\"q\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\r\n\tNOT_FOUND\x10\x06\"\xca\x01\n\x1aRestoreSnapshotStatusProto\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.jina.RestoreId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.jina.RestoreSnapshotStatusProto.Status\"V\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tNOT_FOUND\x10\x06\"/\n\x16RestoreSnapshotCommand\x12\x15\n\rsnapshot_file\x18\x01 \x01(\t2Z\n\x12JinaDataRequestRPC\x12\x44\n\x0cprocess_data\x12\x1a.jina.DataRequestListProto\x1a\x16.jina.DataRequestProto\"\x00\x32\x63\n\x18JinaSingleDataRequestRPC\x12G\n\x13process_single_data\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00\x32t\n\x1cJinaSingleDocumentRequestRPC\x12T\n\nstream_doc\x12 .jina.SingleDocumentRequestProto\x1a .jina.SingleDocumentRequestProto\"\x00\x30\x01\x32G\n\x07JinaRPC\x12<\n\x04\x43\x61ll\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00(\x01\x30\x01\x32`\n\x18JinaDiscoverEndpointsRPC\x12\x44\n\x12\x65ndpoint_discovery\x12\x16.google.protobuf.Empty\x1a\x14.jina.EndpointsProto\"\x00\x32N\n\x14JinaGatewayDryRunRPC\x12\x36\n\x07\x64ry_run\x12\x16.google.protobuf.Empty\x1a\x11.jina.StatusProto\"\x00\x32G\n\x0bJinaInfoRPC\x12\x38\n\x07_status\x12\x16.google.protobuf.Empty\x1a\x13.jina.JinaInfoProto\"\x00\x32W\n\x14JinaExecutorSnapshot\x12?\n\x08snapshot\x12\x16.google.protobuf.Empty\x1a\x19.jina.SnapshotStatusProto\"\x00\x32`\n\x1cJinaExecutorSnapshotProgress\x12@\n\x0fsnapshot_status\x12\x10.jina.SnapshotId\x1a\x19.jina.SnapshotStatusProto\"\x00\x32\x62\n\x13JinaExecutorRestore\x12K\n\x07restore\x12\x1c.jina.RestoreSnapshotCommand\x1a .jina.RestoreSnapshotStatusProto\"\x00\x32\x64\n\x1bJinaExecutorRestoreProgress\x12\x45\n\x0erestore_status\x12\x0f.jina.RestoreId\x1a .jina.RestoreSnapshotStatusProto\"\x00\x62\x06proto3') - +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\njina.proto\x12\x04jina\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x0e\x64ocarray.proto\"\x9f\x01\n\nRouteProto\x12\x10\n\x08\x65xecutor\x18\x01 \x01(\t\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x06status\x18\x04 \x01(\x0b\x32\x11.jina.StatusProto\"\xc3\x01\n\rJinaInfoProto\x12+\n\x04jina\x18\x01 \x03(\x0b\x32\x1d.jina.JinaInfoProto.JinaEntry\x12+\n\x04\x65nvs\x18\x02 \x03(\x0b\x32\x1d.jina.JinaInfoProto.EnvsEntry\x1a+\n\tJinaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc6\x01\n\x0bHeaderProto\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0b\x32\x11.jina.StatusProto\x12\x1a\n\rexec_endpoint\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x0ftarget_executor\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07timeout\x18\x05 \x01(\rH\x02\x88\x01\x01\x42\x10\n\x0e_exec_endpointB\x12\n\x10_target_executorB\n\n\x08_timeout\"f\n\x0e\x45ndpointsProto\x12\x11\n\tendpoints\x18\x01 \x03(\t\x12\x17\n\x0fwrite_endpoints\x18\x02 \x03(\t\x12(\n\x07schemas\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xf9\x01\n\x0bStatusProto\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.jina.StatusProto.StatusCode\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x33\n\texception\x18\x03 \x01(\x0b\x32 .jina.StatusProto.ExceptionProto\x1aN\n\x0e\x45xceptionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x0e\n\x06stacks\x18\x03 \x03(\t\x12\x10\n\x08\x65xecutor\x18\x04 \x01(\t\"$\n\nStatusCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"^\n\rRelatedEntity\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\x08shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x42\x0b\n\t_shard_id\"\xa0\x02\n\x10\x44\x61taRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.jina.DataRequestProto.DataContentProto\x1a\x63\n\x10\x44\x61taContentProto\x12,\n\x04\x64ocs\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12\x14\n\ndocs_bytes\x18\x02 \x01(\x0cH\x00\x42\x0b\n\tdocuments\"\xb9\x01\n\x1aSingleDocumentRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12)\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProto\"\x8a\x01\n\x16\x44\x61taRequestProtoWoData\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\"@\n\x14\x44\x61taRequestListProto\x12(\n\x08requests\x18\x01 \x03(\x0b\x32\x16.jina.DataRequestProto\"\x1b\n\nSnapshotId\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tRestoreId\x12\r\n\x05value\x18\x01 \x01(\t\"\xef\x01\n\x13SnapshotStatusProto\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.jina.SnapshotId\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .jina.SnapshotStatusProto.Status\x12\x15\n\rsnapshot_file\x18\x03 \x01(\t\"q\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\r\n\tNOT_FOUND\x10\x06\"\xca\x01\n\x1aRestoreSnapshotStatusProto\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.jina.RestoreId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.jina.RestoreSnapshotStatusProto.Status\"V\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tNOT_FOUND\x10\x06\"/\n\x16RestoreSnapshotCommand\x12\x15\n\rsnapshot_file\x18\x01 \x01(\t2Z\n\x12JinaDataRequestRPC\x12\x44\n\x0cprocess_data\x12\x1a.jina.DataRequestListProto\x1a\x16.jina.DataRequestProto\"\x00\x32\x63\n\x18JinaSingleDataRequestRPC\x12G\n\x13process_single_data\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00\x32t\n\x1cJinaSingleDocumentRequestRPC\x12T\n\nstream_doc\x12 .jina.SingleDocumentRequestProto\x1a .jina.SingleDocumentRequestProto\"\x00\x30\x01\x32G\n\x07JinaRPC\x12<\n\x04\x43\x61ll\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00(\x01\x30\x01\x32`\n\x18JinaDiscoverEndpointsRPC\x12\x44\n\x12\x65ndpoint_discovery\x12\x16.google.protobuf.Empty\x1a\x14.jina.EndpointsProto\"\x00\x32N\n\x14JinaGatewayDryRunRPC\x12\x36\n\x07\x64ry_run\x12\x16.google.protobuf.Empty\x1a\x11.jina.StatusProto\"\x00\x32G\n\x0bJinaInfoRPC\x12\x38\n\x07_status\x12\x16.google.protobuf.Empty\x1a\x13.jina.JinaInfoProto\"\x00\x32W\n\x14JinaExecutorSnapshot\x12?\n\x08snapshot\x12\x16.google.protobuf.Empty\x1a\x19.jina.SnapshotStatusProto\"\x00\x32`\n\x1cJinaExecutorSnapshotProgress\x12@\n\x0fsnapshot_status\x12\x10.jina.SnapshotId\x1a\x19.jina.SnapshotStatusProto\"\x00\x32\x62\n\x13JinaExecutorRestore\x12K\n\x07restore\x12\x1c.jina.RestoreSnapshotCommand\x1a .jina.RestoreSnapshotStatusProto\"\x00\x32\x64\n\x1bJinaExecutorRestoreProgress\x12\x45\n\x0erestore_status\x12\x0f.jina.RestoreId\x1a .jina.RestoreSnapshotStatusProto\"\x00\x62\x06proto3' +) _ROUTEPROTO = DESCRIPTOR.message_types_by_name['RouteProto'] @@ -32,237 +34,319 @@ _STATUSPROTO_EXCEPTIONPROTO = _STATUSPROTO.nested_types_by_name['ExceptionProto'] _RELATEDENTITY = DESCRIPTOR.message_types_by_name['RelatedEntity'] _DATAREQUESTPROTO = DESCRIPTOR.message_types_by_name['DataRequestProto'] -_DATAREQUESTPROTO_DATACONTENTPROTO = _DATAREQUESTPROTO.nested_types_by_name['DataContentProto'] -_SINGLEDOCUMENTREQUESTPROTO = DESCRIPTOR.message_types_by_name['SingleDocumentRequestProto'] +_DATAREQUESTPROTO_DATACONTENTPROTO = _DATAREQUESTPROTO.nested_types_by_name[ + 'DataContentProto' +] +_SINGLEDOCUMENTREQUESTPROTO = DESCRIPTOR.message_types_by_name[ + 'SingleDocumentRequestProto' +] _DATAREQUESTPROTOWODATA = DESCRIPTOR.message_types_by_name['DataRequestProtoWoData'] _DATAREQUESTLISTPROTO = DESCRIPTOR.message_types_by_name['DataRequestListProto'] _SNAPSHOTID = DESCRIPTOR.message_types_by_name['SnapshotId'] _RESTOREID = DESCRIPTOR.message_types_by_name['RestoreId'] _SNAPSHOTSTATUSPROTO = DESCRIPTOR.message_types_by_name['SnapshotStatusProto'] -_RESTORESNAPSHOTSTATUSPROTO = DESCRIPTOR.message_types_by_name['RestoreSnapshotStatusProto'] +_RESTORESNAPSHOTSTATUSPROTO = DESCRIPTOR.message_types_by_name[ + 'RestoreSnapshotStatusProto' +] _RESTORESNAPSHOTCOMMAND = DESCRIPTOR.message_types_by_name['RestoreSnapshotCommand'] _STATUSPROTO_STATUSCODE = _STATUSPROTO.enum_types_by_name['StatusCode'] _SNAPSHOTSTATUSPROTO_STATUS = _SNAPSHOTSTATUSPROTO.enum_types_by_name['Status'] -_RESTORESNAPSHOTSTATUSPROTO_STATUS = _RESTORESNAPSHOTSTATUSPROTO.enum_types_by_name['Status'] -RouteProto = _reflection.GeneratedProtocolMessageType('RouteProto', (_message.Message,), { - 'DESCRIPTOR' : _ROUTEPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RouteProto) - }) +_RESTORESNAPSHOTSTATUSPROTO_STATUS = _RESTORESNAPSHOTSTATUSPROTO.enum_types_by_name[ + 'Status' +] +RouteProto = _reflection.GeneratedProtocolMessageType( + 'RouteProto', + (_message.Message,), + { + 'DESCRIPTOR': _ROUTEPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RouteProto) + }, +) _sym_db.RegisterMessage(RouteProto) -JinaInfoProto = _reflection.GeneratedProtocolMessageType('JinaInfoProto', (_message.Message,), { - - 'JinaEntry' : _reflection.GeneratedProtocolMessageType('JinaEntry', (_message.Message,), { - 'DESCRIPTOR' : _JINAINFOPROTO_JINAENTRY, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.JinaInfoProto.JinaEntry) - }) - , - - 'EnvsEntry' : _reflection.GeneratedProtocolMessageType('EnvsEntry', (_message.Message,), { - 'DESCRIPTOR' : _JINAINFOPROTO_ENVSENTRY, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.JinaInfoProto.EnvsEntry) - }) - , - 'DESCRIPTOR' : _JINAINFOPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.JinaInfoProto) - }) +JinaInfoProto = _reflection.GeneratedProtocolMessageType( + 'JinaInfoProto', + (_message.Message,), + { + 'JinaEntry': _reflection.GeneratedProtocolMessageType( + 'JinaEntry', + (_message.Message,), + { + 'DESCRIPTOR': _JINAINFOPROTO_JINAENTRY, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.JinaInfoProto.JinaEntry) + }, + ), + 'EnvsEntry': _reflection.GeneratedProtocolMessageType( + 'EnvsEntry', + (_message.Message,), + { + 'DESCRIPTOR': _JINAINFOPROTO_ENVSENTRY, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.JinaInfoProto.EnvsEntry) + }, + ), + 'DESCRIPTOR': _JINAINFOPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.JinaInfoProto) + }, +) _sym_db.RegisterMessage(JinaInfoProto) _sym_db.RegisterMessage(JinaInfoProto.JinaEntry) _sym_db.RegisterMessage(JinaInfoProto.EnvsEntry) -HeaderProto = _reflection.GeneratedProtocolMessageType('HeaderProto', (_message.Message,), { - 'DESCRIPTOR' : _HEADERPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.HeaderProto) - }) +HeaderProto = _reflection.GeneratedProtocolMessageType( + 'HeaderProto', + (_message.Message,), + { + 'DESCRIPTOR': _HEADERPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.HeaderProto) + }, +) _sym_db.RegisterMessage(HeaderProto) -EndpointsProto = _reflection.GeneratedProtocolMessageType('EndpointsProto', (_message.Message,), { - 'DESCRIPTOR' : _ENDPOINTSPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.EndpointsProto) - }) +EndpointsProto = _reflection.GeneratedProtocolMessageType( + 'EndpointsProto', + (_message.Message,), + { + 'DESCRIPTOR': _ENDPOINTSPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.EndpointsProto) + }, +) _sym_db.RegisterMessage(EndpointsProto) -StatusProto = _reflection.GeneratedProtocolMessageType('StatusProto', (_message.Message,), { - - 'ExceptionProto' : _reflection.GeneratedProtocolMessageType('ExceptionProto', (_message.Message,), { - 'DESCRIPTOR' : _STATUSPROTO_EXCEPTIONPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.StatusProto.ExceptionProto) - }) - , - 'DESCRIPTOR' : _STATUSPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.StatusProto) - }) +StatusProto = _reflection.GeneratedProtocolMessageType( + 'StatusProto', + (_message.Message,), + { + 'ExceptionProto': _reflection.GeneratedProtocolMessageType( + 'ExceptionProto', + (_message.Message,), + { + 'DESCRIPTOR': _STATUSPROTO_EXCEPTIONPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.StatusProto.ExceptionProto) + }, + ), + 'DESCRIPTOR': _STATUSPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.StatusProto) + }, +) _sym_db.RegisterMessage(StatusProto) _sym_db.RegisterMessage(StatusProto.ExceptionProto) -RelatedEntity = _reflection.GeneratedProtocolMessageType('RelatedEntity', (_message.Message,), { - 'DESCRIPTOR' : _RELATEDENTITY, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RelatedEntity) - }) +RelatedEntity = _reflection.GeneratedProtocolMessageType( + 'RelatedEntity', + (_message.Message,), + { + 'DESCRIPTOR': _RELATEDENTITY, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RelatedEntity) + }, +) _sym_db.RegisterMessage(RelatedEntity) -DataRequestProto = _reflection.GeneratedProtocolMessageType('DataRequestProto', (_message.Message,), { - - 'DataContentProto' : _reflection.GeneratedProtocolMessageType('DataContentProto', (_message.Message,), { - 'DESCRIPTOR' : _DATAREQUESTPROTO_DATACONTENTPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.DataRequestProto.DataContentProto) - }) - , - 'DESCRIPTOR' : _DATAREQUESTPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.DataRequestProto) - }) +DataRequestProto = _reflection.GeneratedProtocolMessageType( + 'DataRequestProto', + (_message.Message,), + { + 'DataContentProto': _reflection.GeneratedProtocolMessageType( + 'DataContentProto', + (_message.Message,), + { + 'DESCRIPTOR': _DATAREQUESTPROTO_DATACONTENTPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.DataRequestProto.DataContentProto) + }, + ), + 'DESCRIPTOR': _DATAREQUESTPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.DataRequestProto) + }, +) _sym_db.RegisterMessage(DataRequestProto) _sym_db.RegisterMessage(DataRequestProto.DataContentProto) -SingleDocumentRequestProto = _reflection.GeneratedProtocolMessageType('SingleDocumentRequestProto', (_message.Message,), { - 'DESCRIPTOR' : _SINGLEDOCUMENTREQUESTPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.SingleDocumentRequestProto) - }) +SingleDocumentRequestProto = _reflection.GeneratedProtocolMessageType( + 'SingleDocumentRequestProto', + (_message.Message,), + { + 'DESCRIPTOR': _SINGLEDOCUMENTREQUESTPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.SingleDocumentRequestProto) + }, +) _sym_db.RegisterMessage(SingleDocumentRequestProto) -DataRequestProtoWoData = _reflection.GeneratedProtocolMessageType('DataRequestProtoWoData', (_message.Message,), { - 'DESCRIPTOR' : _DATAREQUESTPROTOWODATA, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.DataRequestProtoWoData) - }) +DataRequestProtoWoData = _reflection.GeneratedProtocolMessageType( + 'DataRequestProtoWoData', + (_message.Message,), + { + 'DESCRIPTOR': _DATAREQUESTPROTOWODATA, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.DataRequestProtoWoData) + }, +) _sym_db.RegisterMessage(DataRequestProtoWoData) -DataRequestListProto = _reflection.GeneratedProtocolMessageType('DataRequestListProto', (_message.Message,), { - 'DESCRIPTOR' : _DATAREQUESTLISTPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.DataRequestListProto) - }) +DataRequestListProto = _reflection.GeneratedProtocolMessageType( + 'DataRequestListProto', + (_message.Message,), + { + 'DESCRIPTOR': _DATAREQUESTLISTPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.DataRequestListProto) + }, +) _sym_db.RegisterMessage(DataRequestListProto) -SnapshotId = _reflection.GeneratedProtocolMessageType('SnapshotId', (_message.Message,), { - 'DESCRIPTOR' : _SNAPSHOTID, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.SnapshotId) - }) +SnapshotId = _reflection.GeneratedProtocolMessageType( + 'SnapshotId', + (_message.Message,), + { + 'DESCRIPTOR': _SNAPSHOTID, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.SnapshotId) + }, +) _sym_db.RegisterMessage(SnapshotId) -RestoreId = _reflection.GeneratedProtocolMessageType('RestoreId', (_message.Message,), { - 'DESCRIPTOR' : _RESTOREID, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RestoreId) - }) +RestoreId = _reflection.GeneratedProtocolMessageType( + 'RestoreId', + (_message.Message,), + { + 'DESCRIPTOR': _RESTOREID, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RestoreId) + }, +) _sym_db.RegisterMessage(RestoreId) -SnapshotStatusProto = _reflection.GeneratedProtocolMessageType('SnapshotStatusProto', (_message.Message,), { - 'DESCRIPTOR' : _SNAPSHOTSTATUSPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.SnapshotStatusProto) - }) +SnapshotStatusProto = _reflection.GeneratedProtocolMessageType( + 'SnapshotStatusProto', + (_message.Message,), + { + 'DESCRIPTOR': _SNAPSHOTSTATUSPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.SnapshotStatusProto) + }, +) _sym_db.RegisterMessage(SnapshotStatusProto) -RestoreSnapshotStatusProto = _reflection.GeneratedProtocolMessageType('RestoreSnapshotStatusProto', (_message.Message,), { - 'DESCRIPTOR' : _RESTORESNAPSHOTSTATUSPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RestoreSnapshotStatusProto) - }) +RestoreSnapshotStatusProto = _reflection.GeneratedProtocolMessageType( + 'RestoreSnapshotStatusProto', + (_message.Message,), + { + 'DESCRIPTOR': _RESTORESNAPSHOTSTATUSPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RestoreSnapshotStatusProto) + }, +) _sym_db.RegisterMessage(RestoreSnapshotStatusProto) -RestoreSnapshotCommand = _reflection.GeneratedProtocolMessageType('RestoreSnapshotCommand', (_message.Message,), { - 'DESCRIPTOR' : _RESTORESNAPSHOTCOMMAND, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RestoreSnapshotCommand) - }) +RestoreSnapshotCommand = _reflection.GeneratedProtocolMessageType( + 'RestoreSnapshotCommand', + (_message.Message,), + { + 'DESCRIPTOR': _RESTORESNAPSHOTCOMMAND, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RestoreSnapshotCommand) + }, +) _sym_db.RegisterMessage(RestoreSnapshotCommand) _JINADATAREQUESTRPC = DESCRIPTOR.services_by_name['JinaDataRequestRPC'] _JINASINGLEDATAREQUESTRPC = DESCRIPTOR.services_by_name['JinaSingleDataRequestRPC'] -_JINASINGLEDOCUMENTREQUESTRPC = DESCRIPTOR.services_by_name['JinaSingleDocumentRequestRPC'] +_JINASINGLEDOCUMENTREQUESTRPC = DESCRIPTOR.services_by_name[ + 'JinaSingleDocumentRequestRPC' +] _JINARPC = DESCRIPTOR.services_by_name['JinaRPC'] _JINADISCOVERENDPOINTSRPC = DESCRIPTOR.services_by_name['JinaDiscoverEndpointsRPC'] _JINAGATEWAYDRYRUNRPC = DESCRIPTOR.services_by_name['JinaGatewayDryRunRPC'] _JINAINFORPC = DESCRIPTOR.services_by_name['JinaInfoRPC'] _JINAEXECUTORSNAPSHOT = DESCRIPTOR.services_by_name['JinaExecutorSnapshot'] -_JINAEXECUTORSNAPSHOTPROGRESS = DESCRIPTOR.services_by_name['JinaExecutorSnapshotProgress'] +_JINAEXECUTORSNAPSHOTPROGRESS = DESCRIPTOR.services_by_name[ + 'JinaExecutorSnapshotProgress' +] _JINAEXECUTORRESTORE = DESCRIPTOR.services_by_name['JinaExecutorRestore'] -_JINAEXECUTORRESTOREPROGRESS = DESCRIPTOR.services_by_name['JinaExecutorRestoreProgress'] +_JINAEXECUTORRESTOREPROGRESS = DESCRIPTOR.services_by_name[ + 'JinaExecutorRestoreProgress' +] if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _JINAINFOPROTO_JINAENTRY._options = None - _JINAINFOPROTO_JINAENTRY._serialized_options = b'8\001' - _JINAINFOPROTO_ENVSENTRY._options = None - _JINAINFOPROTO_ENVSENTRY._serialized_options = b'8\001' - _ROUTEPROTO._serialized_start=129 - _ROUTEPROTO._serialized_end=288 - _JINAINFOPROTO._serialized_start=291 - _JINAINFOPROTO._serialized_end=486 - _JINAINFOPROTO_JINAENTRY._serialized_start=398 - _JINAINFOPROTO_JINAENTRY._serialized_end=441 - _JINAINFOPROTO_ENVSENTRY._serialized_start=443 - _JINAINFOPROTO_ENVSENTRY._serialized_end=486 - _HEADERPROTO._serialized_start=489 - _HEADERPROTO._serialized_end=687 - _ENDPOINTSPROTO._serialized_start=689 - _ENDPOINTSPROTO._serialized_end=791 - _STATUSPROTO._serialized_start=794 - _STATUSPROTO._serialized_end=1043 - _STATUSPROTO_EXCEPTIONPROTO._serialized_start=927 - _STATUSPROTO_EXCEPTIONPROTO._serialized_end=1005 - _STATUSPROTO_STATUSCODE._serialized_start=1007 - _STATUSPROTO_STATUSCODE._serialized_end=1043 - _RELATEDENTITY._serialized_start=1045 - _RELATEDENTITY._serialized_end=1139 - _DATAREQUESTPROTO._serialized_start=1142 - _DATAREQUESTPROTO._serialized_end=1430 - _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_start=1331 - _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_end=1430 - _SINGLEDOCUMENTREQUESTPROTO._serialized_start=1433 - _SINGLEDOCUMENTREQUESTPROTO._serialized_end=1618 - _DATAREQUESTPROTOWODATA._serialized_start=1621 - _DATAREQUESTPROTOWODATA._serialized_end=1759 - _DATAREQUESTLISTPROTO._serialized_start=1761 - _DATAREQUESTLISTPROTO._serialized_end=1825 - _SNAPSHOTID._serialized_start=1827 - _SNAPSHOTID._serialized_end=1854 - _RESTOREID._serialized_start=1856 - _RESTOREID._serialized_end=1882 - _SNAPSHOTSTATUSPROTO._serialized_start=1885 - _SNAPSHOTSTATUSPROTO._serialized_end=2124 - _SNAPSHOTSTATUSPROTO_STATUS._serialized_start=2011 - _SNAPSHOTSTATUSPROTO_STATUS._serialized_end=2124 - _RESTORESNAPSHOTSTATUSPROTO._serialized_start=2127 - _RESTORESNAPSHOTSTATUSPROTO._serialized_end=2329 - _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_start=2243 - _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_end=2329 - _RESTORESNAPSHOTCOMMAND._serialized_start=2331 - _RESTORESNAPSHOTCOMMAND._serialized_end=2378 - _JINADATAREQUESTRPC._serialized_start=2380 - _JINADATAREQUESTRPC._serialized_end=2470 - _JINASINGLEDATAREQUESTRPC._serialized_start=2472 - _JINASINGLEDATAREQUESTRPC._serialized_end=2571 - _JINASINGLEDOCUMENTREQUESTRPC._serialized_start=2573 - _JINASINGLEDOCUMENTREQUESTRPC._serialized_end=2689 - _JINARPC._serialized_start=2691 - _JINARPC._serialized_end=2762 - _JINADISCOVERENDPOINTSRPC._serialized_start=2764 - _JINADISCOVERENDPOINTSRPC._serialized_end=2860 - _JINAGATEWAYDRYRUNRPC._serialized_start=2862 - _JINAGATEWAYDRYRUNRPC._serialized_end=2940 - _JINAINFORPC._serialized_start=2942 - _JINAINFORPC._serialized_end=3013 - _JINAEXECUTORSNAPSHOT._serialized_start=3015 - _JINAEXECUTORSNAPSHOT._serialized_end=3102 - _JINAEXECUTORSNAPSHOTPROGRESS._serialized_start=3104 - _JINAEXECUTORSNAPSHOTPROGRESS._serialized_end=3200 - _JINAEXECUTORRESTORE._serialized_start=3202 - _JINAEXECUTORRESTORE._serialized_end=3300 - _JINAEXECUTORRESTOREPROGRESS._serialized_start=3302 - _JINAEXECUTORRESTOREPROGRESS._serialized_end=3402 + DESCRIPTOR._options = None + _JINAINFOPROTO_JINAENTRY._options = None + _JINAINFOPROTO_JINAENTRY._serialized_options = b'8\001' + _JINAINFOPROTO_ENVSENTRY._options = None + _JINAINFOPROTO_ENVSENTRY._serialized_options = b'8\001' + _ROUTEPROTO._serialized_start = 129 + _ROUTEPROTO._serialized_end = 288 + _JINAINFOPROTO._serialized_start = 291 + _JINAINFOPROTO._serialized_end = 486 + _JINAINFOPROTO_JINAENTRY._serialized_start = 398 + _JINAINFOPROTO_JINAENTRY._serialized_end = 441 + _JINAINFOPROTO_ENVSENTRY._serialized_start = 443 + _JINAINFOPROTO_ENVSENTRY._serialized_end = 486 + _HEADERPROTO._serialized_start = 489 + _HEADERPROTO._serialized_end = 687 + _ENDPOINTSPROTO._serialized_start = 689 + _ENDPOINTSPROTO._serialized_end = 791 + _STATUSPROTO._serialized_start = 794 + _STATUSPROTO._serialized_end = 1043 + _STATUSPROTO_EXCEPTIONPROTO._serialized_start = 927 + _STATUSPROTO_EXCEPTIONPROTO._serialized_end = 1005 + _STATUSPROTO_STATUSCODE._serialized_start = 1007 + _STATUSPROTO_STATUSCODE._serialized_end = 1043 + _RELATEDENTITY._serialized_start = 1045 + _RELATEDENTITY._serialized_end = 1139 + _DATAREQUESTPROTO._serialized_start = 1142 + _DATAREQUESTPROTO._serialized_end = 1430 + _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_start = 1331 + _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_end = 1430 + _SINGLEDOCUMENTREQUESTPROTO._serialized_start = 1433 + _SINGLEDOCUMENTREQUESTPROTO._serialized_end = 1618 + _DATAREQUESTPROTOWODATA._serialized_start = 1621 + _DATAREQUESTPROTOWODATA._serialized_end = 1759 + _DATAREQUESTLISTPROTO._serialized_start = 1761 + _DATAREQUESTLISTPROTO._serialized_end = 1825 + _SNAPSHOTID._serialized_start = 1827 + _SNAPSHOTID._serialized_end = 1854 + _RESTOREID._serialized_start = 1856 + _RESTOREID._serialized_end = 1882 + _SNAPSHOTSTATUSPROTO._serialized_start = 1885 + _SNAPSHOTSTATUSPROTO._serialized_end = 2124 + _SNAPSHOTSTATUSPROTO_STATUS._serialized_start = 2011 + _SNAPSHOTSTATUSPROTO_STATUS._serialized_end = 2124 + _RESTORESNAPSHOTSTATUSPROTO._serialized_start = 2127 + _RESTORESNAPSHOTSTATUSPROTO._serialized_end = 2329 + _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_start = 2243 + _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_end = 2329 + _RESTORESNAPSHOTCOMMAND._serialized_start = 2331 + _RESTORESNAPSHOTCOMMAND._serialized_end = 2378 + _JINADATAREQUESTRPC._serialized_start = 2380 + _JINADATAREQUESTRPC._serialized_end = 2470 + _JINASINGLEDATAREQUESTRPC._serialized_start = 2472 + _JINASINGLEDATAREQUESTRPC._serialized_end = 2571 + _JINASINGLEDOCUMENTREQUESTRPC._serialized_start = 2573 + _JINASINGLEDOCUMENTREQUESTRPC._serialized_end = 2689 + _JINARPC._serialized_start = 2691 + _JINARPC._serialized_end = 2762 + _JINADISCOVERENDPOINTSRPC._serialized_start = 2764 + _JINADISCOVERENDPOINTSRPC._serialized_end = 2860 + _JINAGATEWAYDRYRUNRPC._serialized_start = 2862 + _JINAGATEWAYDRYRUNRPC._serialized_end = 2940 + _JINAINFORPC._serialized_start = 2942 + _JINAINFORPC._serialized_end = 3013 + _JINAEXECUTORSNAPSHOT._serialized_start = 3015 + _JINAEXECUTORSNAPSHOT._serialized_end = 3102 + _JINAEXECUTORSNAPSHOTPROGRESS._serialized_start = 3104 + _JINAEXECUTORSNAPSHOTPROGRESS._serialized_end = 3200 + _JINAEXECUTORRESTORE._serialized_start = 3202 + _JINAEXECUTORRESTORE._serialized_end = 3300 + _JINAEXECUTORRESTOREPROGRESS._serialized_start = 3302 + _JINAEXECUTORRESTOREPROGRESS._serialized_end = 3402 # @@protoc_insertion_point(module_scope) diff --git a/jina/proto/docarray_v1/pb2/jina_pb2_grpc.py b/jina/proto/docarray_v1/pb2/jina_pb2_grpc.py index f52ce19e69412..f571beae83675 100644 --- a/jina/proto/docarray_v1/pb2/jina_pb2_grpc.py +++ b/jina/proto/docarray_v1/pb2/jina_pb2_grpc.py @@ -18,10 +18,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.process_data = channel.unary_unary( - '/jina.JinaDataRequestRPC/process_data', - request_serializer=jina__pb2.DataRequestListProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaDataRequestRPC/process_data', + request_serializer=jina__pb2.DataRequestListProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaDataRequestRPCServicer(object): @@ -30,8 +30,7 @@ class JinaDataRequestRPCServicer(object): """ def process_data(self, request, context): - """Used for passing DataRequests to the Executors - """ + """Used for passing DataRequests to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -39,39 +38,52 @@ def process_data(self, request, context): def add_JinaDataRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'process_data': grpc.unary_unary_rpc_method_handler( - servicer.process_data, - request_deserializer=jina__pb2.DataRequestListProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'process_data': grpc.unary_unary_rpc_method_handler( + servicer.process_data, + request_deserializer=jina__pb2.DataRequestListProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaDataRequestRPC', rpc_method_handlers) + 'jina.JinaDataRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaDataRequestRPC(object): """* jina gRPC service for DataRequests. """ @staticmethod - def process_data(request, + def process_data( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaDataRequestRPC/process_data', + '/jina.JinaDataRequestRPC/process_data', jina__pb2.DataRequestListProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaSingleDataRequestRPCStub(object): @@ -87,10 +99,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.process_single_data = channel.unary_unary( - '/jina.JinaSingleDataRequestRPC/process_single_data', - request_serializer=jina__pb2.DataRequestProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaSingleDataRequestRPC/process_single_data', + request_serializer=jina__pb2.DataRequestProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaSingleDataRequestRPCServicer(object): @@ -100,8 +112,7 @@ class JinaSingleDataRequestRPCServicer(object): """ def process_single_data(self, request, context): - """Used for passing DataRequests to the Executors - """ + """Used for passing DataRequests to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -109,18 +120,19 @@ def process_single_data(self, request, context): def add_JinaSingleDataRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'process_single_data': grpc.unary_unary_rpc_method_handler( - servicer.process_single_data, - request_deserializer=jina__pb2.DataRequestProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'process_single_data': grpc.unary_unary_rpc_method_handler( + servicer.process_single_data, + request_deserializer=jina__pb2.DataRequestProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaSingleDataRequestRPC', rpc_method_handlers) + 'jina.JinaSingleDataRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaSingleDataRequestRPC(object): """* jina gRPC service for DataRequests. @@ -128,21 +140,33 @@ class JinaSingleDataRequestRPC(object): """ @staticmethod - def process_single_data(request, + def process_single_data( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaSingleDataRequestRPC/process_single_data', + '/jina.JinaSingleDataRequestRPC/process_single_data', jina__pb2.DataRequestProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaSingleDocumentRequestRPCStub(object): @@ -158,10 +182,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.stream_doc = channel.unary_stream( - '/jina.JinaSingleDocumentRequestRPC/stream_doc', - request_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, - response_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, - ) + '/jina.JinaSingleDocumentRequestRPC/stream_doc', + request_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, + response_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, + ) class JinaSingleDocumentRequestRPCServicer(object): @@ -171,8 +195,7 @@ class JinaSingleDocumentRequestRPCServicer(object): """ def stream_doc(self, request, context): - """Used for streaming one document to the Executors - """ + """Used for streaming one document to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -180,18 +203,19 @@ def stream_doc(self, request, context): def add_JinaSingleDocumentRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'stream_doc': grpc.unary_stream_rpc_method_handler( - servicer.stream_doc, - request_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, - response_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, - ), + 'stream_doc': grpc.unary_stream_rpc_method_handler( + servicer.stream_doc, + request_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, + response_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaSingleDocumentRequestRPC', rpc_method_handlers) + 'jina.JinaSingleDocumentRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaSingleDocumentRequestRPC(object): """* jina gRPC service for DataRequests. @@ -199,21 +223,33 @@ class JinaSingleDocumentRequestRPC(object): """ @staticmethod - def stream_doc(request, + def stream_doc( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream(request, target, '/jina.JinaSingleDocumentRequestRPC/stream_doc', + '/jina.JinaSingleDocumentRequestRPC/stream_doc', jina__pb2.SingleDocumentRequestProto.SerializeToString, jina__pb2.SingleDocumentRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaRPCStub(object): @@ -228,10 +264,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.Call = channel.stream_stream( - '/jina.JinaRPC/Call', - request_serializer=jina__pb2.DataRequestProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaRPC/Call', + request_serializer=jina__pb2.DataRequestProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaRPCServicer(object): @@ -240,8 +276,7 @@ class JinaRPCServicer(object): """ def Call(self, request_iterator, context): - """Pass in a Request and a filled Request with matches will be returned. - """ + """Pass in a Request and a filled Request with matches will be returned.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -249,39 +284,52 @@ def Call(self, request_iterator, context): def add_JinaRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'Call': grpc.stream_stream_rpc_method_handler( - servicer.Call, - request_deserializer=jina__pb2.DataRequestProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'Call': grpc.stream_stream_rpc_method_handler( + servicer.Call, + request_deserializer=jina__pb2.DataRequestProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaRPC', rpc_method_handlers) + 'jina.JinaRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaRPC(object): """* jina streaming gRPC service. """ @staticmethod - def Call(request_iterator, + def Call( + request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.stream_stream( + request_iterator, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream(request_iterator, target, '/jina.JinaRPC/Call', + '/jina.JinaRPC/Call', jina__pb2.DataRequestProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaDiscoverEndpointsRPCStub(object): @@ -296,10 +344,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.endpoint_discovery = channel.unary_unary( - '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.EndpointsProto.FromString, - ) + '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.EndpointsProto.FromString, + ) class JinaDiscoverEndpointsRPCServicer(object): @@ -316,39 +364,52 @@ def endpoint_discovery(self, request, context): def add_JinaDiscoverEndpointsRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'endpoint_discovery': grpc.unary_unary_rpc_method_handler( - servicer.endpoint_discovery, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.EndpointsProto.SerializeToString, - ), + 'endpoint_discovery': grpc.unary_unary_rpc_method_handler( + servicer.endpoint_discovery, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.EndpointsProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaDiscoverEndpointsRPC', rpc_method_handlers) + 'jina.JinaDiscoverEndpointsRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaDiscoverEndpointsRPC(object): """* jina gRPC service to expose Endpoints from Executors. """ @staticmethod - def endpoint_discovery(request, + def endpoint_discovery( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', + '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.EndpointsProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaGatewayDryRunRPCStub(object): @@ -363,10 +424,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.dry_run = channel.unary_unary( - '/jina.JinaGatewayDryRunRPC/dry_run', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.StatusProto.FromString, - ) + '/jina.JinaGatewayDryRunRPC/dry_run', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.StatusProto.FromString, + ) class JinaGatewayDryRunRPCServicer(object): @@ -383,39 +444,52 @@ def dry_run(self, request, context): def add_JinaGatewayDryRunRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'dry_run': grpc.unary_unary_rpc_method_handler( - servicer.dry_run, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.StatusProto.SerializeToString, - ), + 'dry_run': grpc.unary_unary_rpc_method_handler( + servicer.dry_run, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.StatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaGatewayDryRunRPC', rpc_method_handlers) + 'jina.JinaGatewayDryRunRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaGatewayDryRunRPC(object): """* jina gRPC service to expose Endpoints from Executors. """ @staticmethod - def dry_run(request, + def dry_run( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaGatewayDryRunRPC/dry_run', + '/jina.JinaGatewayDryRunRPC/dry_run', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.StatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaInfoRPCStub(object): @@ -430,10 +504,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self._status = channel.unary_unary( - '/jina.JinaInfoRPC/_status', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.JinaInfoProto.FromString, - ) + '/jina.JinaInfoRPC/_status', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.JinaInfoProto.FromString, + ) class JinaInfoRPCServicer(object): @@ -450,39 +524,52 @@ def _status(self, request, context): def add_JinaInfoRPCServicer_to_server(servicer, server): rpc_method_handlers = { - '_status': grpc.unary_unary_rpc_method_handler( - servicer._status, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.JinaInfoProto.SerializeToString, - ), + '_status': grpc.unary_unary_rpc_method_handler( + servicer._status, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.JinaInfoProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaInfoRPC', rpc_method_handlers) + 'jina.JinaInfoRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaInfoRPC(object): """* jina gRPC service to expose information about running jina version and environment. """ @staticmethod - def _status(request, + def _status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaInfoRPC/_status', + '/jina.JinaInfoRPC/_status', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.JinaInfoProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorSnapshotStub(object): @@ -497,10 +584,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.snapshot = channel.unary_unary( - '/jina.JinaExecutorSnapshot/snapshot', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.SnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorSnapshot/snapshot', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.SnapshotStatusProto.FromString, + ) class JinaExecutorSnapshotServicer(object): @@ -517,39 +604,52 @@ def snapshot(self, request, context): def add_JinaExecutorSnapshotServicer_to_server(servicer, server): rpc_method_handlers = { - 'snapshot': grpc.unary_unary_rpc_method_handler( - servicer.snapshot, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, - ), + 'snapshot': grpc.unary_unary_rpc_method_handler( + servicer.snapshot, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorSnapshot', rpc_method_handlers) + 'jina.JinaExecutorSnapshot', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorSnapshot(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def snapshot(request, + def snapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorSnapshot/snapshot', + '/jina.JinaExecutorSnapshot/snapshot', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.SnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorSnapshotProgressStub(object): @@ -564,10 +664,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.snapshot_status = channel.unary_unary( - '/jina.JinaExecutorSnapshotProgress/snapshot_status', - request_serializer=jina__pb2.SnapshotId.SerializeToString, - response_deserializer=jina__pb2.SnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorSnapshotProgress/snapshot_status', + request_serializer=jina__pb2.SnapshotId.SerializeToString, + response_deserializer=jina__pb2.SnapshotStatusProto.FromString, + ) class JinaExecutorSnapshotProgressServicer(object): @@ -584,39 +684,52 @@ def snapshot_status(self, request, context): def add_JinaExecutorSnapshotProgressServicer_to_server(servicer, server): rpc_method_handlers = { - 'snapshot_status': grpc.unary_unary_rpc_method_handler( - servicer.snapshot_status, - request_deserializer=jina__pb2.SnapshotId.FromString, - response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, - ), + 'snapshot_status': grpc.unary_unary_rpc_method_handler( + servicer.snapshot_status, + request_deserializer=jina__pb2.SnapshotId.FromString, + response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorSnapshotProgress', rpc_method_handlers) + 'jina.JinaExecutorSnapshotProgress', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorSnapshotProgress(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def snapshot_status(request, + def snapshot_status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorSnapshotProgress/snapshot_status', + '/jina.JinaExecutorSnapshotProgress/snapshot_status', jina__pb2.SnapshotId.SerializeToString, jina__pb2.SnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorRestoreStub(object): @@ -631,10 +744,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.restore = channel.unary_unary( - '/jina.JinaExecutorRestore/restore', - request_serializer=jina__pb2.RestoreSnapshotCommand.SerializeToString, - response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorRestore/restore', + request_serializer=jina__pb2.RestoreSnapshotCommand.SerializeToString, + response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, + ) class JinaExecutorRestoreServicer(object): @@ -651,39 +764,52 @@ def restore(self, request, context): def add_JinaExecutorRestoreServicer_to_server(servicer, server): rpc_method_handlers = { - 'restore': grpc.unary_unary_rpc_method_handler( - servicer.restore, - request_deserializer=jina__pb2.RestoreSnapshotCommand.FromString, - response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, - ), + 'restore': grpc.unary_unary_rpc_method_handler( + servicer.restore, + request_deserializer=jina__pb2.RestoreSnapshotCommand.FromString, + response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorRestore', rpc_method_handlers) + 'jina.JinaExecutorRestore', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorRestore(object): """* jina gRPC service to trigger a restore at the Executor Runtime. """ @staticmethod - def restore(request, + def restore( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorRestore/restore', + '/jina.JinaExecutorRestore/restore', jina__pb2.RestoreSnapshotCommand.SerializeToString, jina__pb2.RestoreSnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorRestoreProgressStub(object): @@ -698,10 +824,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.restore_status = channel.unary_unary( - '/jina.JinaExecutorRestoreProgress/restore_status', - request_serializer=jina__pb2.RestoreId.SerializeToString, - response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorRestoreProgress/restore_status', + request_serializer=jina__pb2.RestoreId.SerializeToString, + response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, + ) class JinaExecutorRestoreProgressServicer(object): @@ -718,36 +844,49 @@ def restore_status(self, request, context): def add_JinaExecutorRestoreProgressServicer_to_server(servicer, server): rpc_method_handlers = { - 'restore_status': grpc.unary_unary_rpc_method_handler( - servicer.restore_status, - request_deserializer=jina__pb2.RestoreId.FromString, - response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, - ), + 'restore_status': grpc.unary_unary_rpc_method_handler( + servicer.restore_status, + request_deserializer=jina__pb2.RestoreId.FromString, + response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorRestoreProgress', rpc_method_handlers) + 'jina.JinaExecutorRestoreProgress', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorRestoreProgress(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def restore_status(request, + def restore_status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorRestoreProgress/restore_status', + '/jina.JinaExecutorRestoreProgress/restore_status', jina__pb2.RestoreId.SerializeToString, jina__pb2.RestoreSnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/jina/proto/docarray_v2/pb/jina_pb2.py b/jina/proto/docarray_v2/pb/jina_pb2.py index 5b19af007cedc..37adc470eb8f6 100644 --- a/jina/proto/docarray_v2/pb/jina_pb2.py +++ b/jina/proto/docarray_v2/pb/jina_pb2.py @@ -6,6 +6,7 @@ from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -17,81 +18,83 @@ import docarray.proto.pb.docarray_pb2 as docarray__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\njina.proto\x12\x04jina\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x0e\x64ocarray.proto\"\x9f\x01\n\nRouteProto\x12\x10\n\x08\x65xecutor\x18\x01 \x01(\t\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x06status\x18\x04 \x01(\x0b\x32\x11.jina.StatusProto\"\xc3\x01\n\rJinaInfoProto\x12+\n\x04jina\x18\x01 \x03(\x0b\x32\x1d.jina.JinaInfoProto.JinaEntry\x12+\n\x04\x65nvs\x18\x02 \x03(\x0b\x32\x1d.jina.JinaInfoProto.EnvsEntry\x1a+\n\tJinaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc6\x01\n\x0bHeaderProto\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0b\x32\x11.jina.StatusProto\x12\x1a\n\rexec_endpoint\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x0ftarget_executor\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07timeout\x18\x05 \x01(\rH\x02\x88\x01\x01\x42\x10\n\x0e_exec_endpointB\x12\n\x10_target_executorB\n\n\x08_timeout\"f\n\x0e\x45ndpointsProto\x12\x11\n\tendpoints\x18\x01 \x03(\t\x12\x17\n\x0fwrite_endpoints\x18\x02 \x03(\t\x12(\n\x07schemas\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xf9\x01\n\x0bStatusProto\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.jina.StatusProto.StatusCode\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x33\n\texception\x18\x03 \x01(\x0b\x32 .jina.StatusProto.ExceptionProto\x1aN\n\x0e\x45xceptionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x0e\n\x06stacks\x18\x03 \x03(\t\x12\x10\n\x08\x65xecutor\x18\x04 \x01(\t\"$\n\nStatusCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"^\n\rRelatedEntity\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\x08shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x42\x0b\n\t_shard_id\"\x9a\x02\n\x10\x44\x61taRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.jina.DataRequestProto.DataContentProto\x1a]\n\x10\x44\x61taContentProto\x12&\n\x04\x64ocs\x18\x01 \x01(\x0b\x32\x16.docarray.DocListProtoH\x00\x12\x14\n\ndocs_bytes\x18\x02 \x01(\x0cH\x00\x42\x0b\n\tdocuments\"\xb4\x01\n\x1aSingleDocumentRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12$\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x12.docarray.DocProto\"\x8a\x01\n\x16\x44\x61taRequestProtoWoData\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\"@\n\x14\x44\x61taRequestListProto\x12(\n\x08requests\x18\x01 \x03(\x0b\x32\x16.jina.DataRequestProto\"\x1b\n\nSnapshotId\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tRestoreId\x12\r\n\x05value\x18\x01 \x01(\t\"\xef\x01\n\x13SnapshotStatusProto\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.jina.SnapshotId\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .jina.SnapshotStatusProto.Status\x12\x15\n\rsnapshot_file\x18\x03 \x01(\t\"q\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\r\n\tNOT_FOUND\x10\x06\"\xca\x01\n\x1aRestoreSnapshotStatusProto\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.jina.RestoreId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.jina.RestoreSnapshotStatusProto.Status\"V\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tNOT_FOUND\x10\x06\"/\n\x16RestoreSnapshotCommand\x12\x15\n\rsnapshot_file\x18\x01 \x01(\t2Z\n\x12JinaDataRequestRPC\x12\x44\n\x0cprocess_data\x12\x1a.jina.DataRequestListProto\x1a\x16.jina.DataRequestProto\"\x00\x32\x63\n\x18JinaSingleDataRequestRPC\x12G\n\x13process_single_data\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00\x32t\n\x1cJinaSingleDocumentRequestRPC\x12T\n\nstream_doc\x12 .jina.SingleDocumentRequestProto\x1a .jina.SingleDocumentRequestProto\"\x00\x30\x01\x32G\n\x07JinaRPC\x12<\n\x04\x43\x61ll\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00(\x01\x30\x01\x32`\n\x18JinaDiscoverEndpointsRPC\x12\x44\n\x12\x65ndpoint_discovery\x12\x16.google.protobuf.Empty\x1a\x14.jina.EndpointsProto\"\x00\x32N\n\x14JinaGatewayDryRunRPC\x12\x36\n\x07\x64ry_run\x12\x16.google.protobuf.Empty\x1a\x11.jina.StatusProto\"\x00\x32G\n\x0bJinaInfoRPC\x12\x38\n\x07_status\x12\x16.google.protobuf.Empty\x1a\x13.jina.JinaInfoProto\"\x00\x32W\n\x14JinaExecutorSnapshot\x12?\n\x08snapshot\x12\x16.google.protobuf.Empty\x1a\x19.jina.SnapshotStatusProto\"\x00\x32`\n\x1cJinaExecutorSnapshotProgress\x12@\n\x0fsnapshot_status\x12\x10.jina.SnapshotId\x1a\x19.jina.SnapshotStatusProto\"\x00\x32\x62\n\x13JinaExecutorRestore\x12K\n\x07restore\x12\x1c.jina.RestoreSnapshotCommand\x1a .jina.RestoreSnapshotStatusProto\"\x00\x32\x64\n\x1bJinaExecutorRestoreProgress\x12\x45\n\x0erestore_status\x12\x0f.jina.RestoreId\x1a .jina.RestoreSnapshotStatusProto\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\njina.proto\x12\x04jina\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x0e\x64ocarray.proto\"\x9f\x01\n\nRouteProto\x12\x10\n\x08\x65xecutor\x18\x01 \x01(\t\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x06status\x18\x04 \x01(\x0b\x32\x11.jina.StatusProto\"\xc3\x01\n\rJinaInfoProto\x12+\n\x04jina\x18\x01 \x03(\x0b\x32\x1d.jina.JinaInfoProto.JinaEntry\x12+\n\x04\x65nvs\x18\x02 \x03(\x0b\x32\x1d.jina.JinaInfoProto.EnvsEntry\x1a+\n\tJinaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc6\x01\n\x0bHeaderProto\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0b\x32\x11.jina.StatusProto\x12\x1a\n\rexec_endpoint\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x0ftarget_executor\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07timeout\x18\x05 \x01(\rH\x02\x88\x01\x01\x42\x10\n\x0e_exec_endpointB\x12\n\x10_target_executorB\n\n\x08_timeout\"f\n\x0e\x45ndpointsProto\x12\x11\n\tendpoints\x18\x01 \x03(\t\x12\x17\n\x0fwrite_endpoints\x18\x02 \x03(\t\x12(\n\x07schemas\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xf9\x01\n\x0bStatusProto\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.jina.StatusProto.StatusCode\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x33\n\texception\x18\x03 \x01(\x0b\x32 .jina.StatusProto.ExceptionProto\x1aN\n\x0e\x45xceptionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x0e\n\x06stacks\x18\x03 \x03(\t\x12\x10\n\x08\x65xecutor\x18\x04 \x01(\t\"$\n\nStatusCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"^\n\rRelatedEntity\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\x08shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x42\x0b\n\t_shard_id\"\x9a\x02\n\x10\x44\x61taRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.jina.DataRequestProto.DataContentProto\x1a]\n\x10\x44\x61taContentProto\x12&\n\x04\x64ocs\x18\x01 \x01(\x0b\x32\x16.docarray.DocListProtoH\x00\x12\x14\n\ndocs_bytes\x18\x02 \x01(\x0cH\x00\x42\x0b\n\tdocuments\"\xb4\x01\n\x1aSingleDocumentRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12$\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x12.docarray.DocProto\"\x8a\x01\n\x16\x44\x61taRequestProtoWoData\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\"@\n\x14\x44\x61taRequestListProto\x12(\n\x08requests\x18\x01 \x03(\x0b\x32\x16.jina.DataRequestProto\"\x1b\n\nSnapshotId\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tRestoreId\x12\r\n\x05value\x18\x01 \x01(\t\"\xef\x01\n\x13SnapshotStatusProto\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.jina.SnapshotId\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .jina.SnapshotStatusProto.Status\x12\x15\n\rsnapshot_file\x18\x03 \x01(\t\"q\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\r\n\tNOT_FOUND\x10\x06\"\xca\x01\n\x1aRestoreSnapshotStatusProto\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.jina.RestoreId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.jina.RestoreSnapshotStatusProto.Status\"V\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tNOT_FOUND\x10\x06\"/\n\x16RestoreSnapshotCommand\x12\x15\n\rsnapshot_file\x18\x01 \x01(\t2Z\n\x12JinaDataRequestRPC\x12\x44\n\x0cprocess_data\x12\x1a.jina.DataRequestListProto\x1a\x16.jina.DataRequestProto\"\x00\x32\x63\n\x18JinaSingleDataRequestRPC\x12G\n\x13process_single_data\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00\x32t\n\x1cJinaSingleDocumentRequestRPC\x12T\n\nstream_doc\x12 .jina.SingleDocumentRequestProto\x1a .jina.SingleDocumentRequestProto\"\x00\x30\x01\x32G\n\x07JinaRPC\x12<\n\x04\x43\x61ll\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00(\x01\x30\x01\x32`\n\x18JinaDiscoverEndpointsRPC\x12\x44\n\x12\x65ndpoint_discovery\x12\x16.google.protobuf.Empty\x1a\x14.jina.EndpointsProto\"\x00\x32N\n\x14JinaGatewayDryRunRPC\x12\x36\n\x07\x64ry_run\x12\x16.google.protobuf.Empty\x1a\x11.jina.StatusProto\"\x00\x32G\n\x0bJinaInfoRPC\x12\x38\n\x07_status\x12\x16.google.protobuf.Empty\x1a\x13.jina.JinaInfoProto\"\x00\x32W\n\x14JinaExecutorSnapshot\x12?\n\x08snapshot\x12\x16.google.protobuf.Empty\x1a\x19.jina.SnapshotStatusProto\"\x00\x32`\n\x1cJinaExecutorSnapshotProgress\x12@\n\x0fsnapshot_status\x12\x10.jina.SnapshotId\x1a\x19.jina.SnapshotStatusProto\"\x00\x32\x62\n\x13JinaExecutorRestore\x12K\n\x07restore\x12\x1c.jina.RestoreSnapshotCommand\x1a .jina.RestoreSnapshotStatusProto\"\x00\x32\x64\n\x1bJinaExecutorRestoreProgress\x12\x45\n\x0erestore_status\x12\x0f.jina.RestoreId\x1a .jina.RestoreSnapshotStatusProto\"\x00\x62\x06proto3' +) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'jina_pb2', globals()) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _JINAINFOPROTO_JINAENTRY._options = None - _JINAINFOPROTO_JINAENTRY._serialized_options = b'8\001' - _JINAINFOPROTO_ENVSENTRY._options = None - _JINAINFOPROTO_ENVSENTRY._serialized_options = b'8\001' - _ROUTEPROTO._serialized_start=129 - _ROUTEPROTO._serialized_end=288 - _JINAINFOPROTO._serialized_start=291 - _JINAINFOPROTO._serialized_end=486 - _JINAINFOPROTO_JINAENTRY._serialized_start=398 - _JINAINFOPROTO_JINAENTRY._serialized_end=441 - _JINAINFOPROTO_ENVSENTRY._serialized_start=443 - _JINAINFOPROTO_ENVSENTRY._serialized_end=486 - _HEADERPROTO._serialized_start=489 - _HEADERPROTO._serialized_end=687 - _ENDPOINTSPROTO._serialized_start=689 - _ENDPOINTSPROTO._serialized_end=791 - _STATUSPROTO._serialized_start=794 - _STATUSPROTO._serialized_end=1043 - _STATUSPROTO_EXCEPTIONPROTO._serialized_start=927 - _STATUSPROTO_EXCEPTIONPROTO._serialized_end=1005 - _STATUSPROTO_STATUSCODE._serialized_start=1007 - _STATUSPROTO_STATUSCODE._serialized_end=1043 - _RELATEDENTITY._serialized_start=1045 - _RELATEDENTITY._serialized_end=1139 - _DATAREQUESTPROTO._serialized_start=1142 - _DATAREQUESTPROTO._serialized_end=1424 - _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_start=1331 - _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_end=1424 - _SINGLEDOCUMENTREQUESTPROTO._serialized_start=1427 - _SINGLEDOCUMENTREQUESTPROTO._serialized_end=1607 - _DATAREQUESTPROTOWODATA._serialized_start=1610 - _DATAREQUESTPROTOWODATA._serialized_end=1748 - _DATAREQUESTLISTPROTO._serialized_start=1750 - _DATAREQUESTLISTPROTO._serialized_end=1814 - _SNAPSHOTID._serialized_start=1816 - _SNAPSHOTID._serialized_end=1843 - _RESTOREID._serialized_start=1845 - _RESTOREID._serialized_end=1871 - _SNAPSHOTSTATUSPROTO._serialized_start=1874 - _SNAPSHOTSTATUSPROTO._serialized_end=2113 - _SNAPSHOTSTATUSPROTO_STATUS._serialized_start=2000 - _SNAPSHOTSTATUSPROTO_STATUS._serialized_end=2113 - _RESTORESNAPSHOTSTATUSPROTO._serialized_start=2116 - _RESTORESNAPSHOTSTATUSPROTO._serialized_end=2318 - _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_start=2232 - _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_end=2318 - _RESTORESNAPSHOTCOMMAND._serialized_start=2320 - _RESTORESNAPSHOTCOMMAND._serialized_end=2367 - _JINADATAREQUESTRPC._serialized_start=2369 - _JINADATAREQUESTRPC._serialized_end=2459 - _JINASINGLEDATAREQUESTRPC._serialized_start=2461 - _JINASINGLEDATAREQUESTRPC._serialized_end=2560 - _JINASINGLEDOCUMENTREQUESTRPC._serialized_start=2562 - _JINASINGLEDOCUMENTREQUESTRPC._serialized_end=2678 - _JINARPC._serialized_start=2680 - _JINARPC._serialized_end=2751 - _JINADISCOVERENDPOINTSRPC._serialized_start=2753 - _JINADISCOVERENDPOINTSRPC._serialized_end=2849 - _JINAGATEWAYDRYRUNRPC._serialized_start=2851 - _JINAGATEWAYDRYRUNRPC._serialized_end=2929 - _JINAINFORPC._serialized_start=2931 - _JINAINFORPC._serialized_end=3002 - _JINAEXECUTORSNAPSHOT._serialized_start=3004 - _JINAEXECUTORSNAPSHOT._serialized_end=3091 - _JINAEXECUTORSNAPSHOTPROGRESS._serialized_start=3093 - _JINAEXECUTORSNAPSHOTPROGRESS._serialized_end=3189 - _JINAEXECUTORRESTORE._serialized_start=3191 - _JINAEXECUTORRESTORE._serialized_end=3289 - _JINAEXECUTORRESTOREPROGRESS._serialized_start=3291 - _JINAEXECUTORRESTOREPROGRESS._serialized_end=3391 + DESCRIPTOR._options = None + _JINAINFOPROTO_JINAENTRY._options = None + _JINAINFOPROTO_JINAENTRY._serialized_options = b'8\001' + _JINAINFOPROTO_ENVSENTRY._options = None + _JINAINFOPROTO_ENVSENTRY._serialized_options = b'8\001' + _ROUTEPROTO._serialized_start = 129 + _ROUTEPROTO._serialized_end = 288 + _JINAINFOPROTO._serialized_start = 291 + _JINAINFOPROTO._serialized_end = 486 + _JINAINFOPROTO_JINAENTRY._serialized_start = 398 + _JINAINFOPROTO_JINAENTRY._serialized_end = 441 + _JINAINFOPROTO_ENVSENTRY._serialized_start = 443 + _JINAINFOPROTO_ENVSENTRY._serialized_end = 486 + _HEADERPROTO._serialized_start = 489 + _HEADERPROTO._serialized_end = 687 + _ENDPOINTSPROTO._serialized_start = 689 + _ENDPOINTSPROTO._serialized_end = 791 + _STATUSPROTO._serialized_start = 794 + _STATUSPROTO._serialized_end = 1043 + _STATUSPROTO_EXCEPTIONPROTO._serialized_start = 927 + _STATUSPROTO_EXCEPTIONPROTO._serialized_end = 1005 + _STATUSPROTO_STATUSCODE._serialized_start = 1007 + _STATUSPROTO_STATUSCODE._serialized_end = 1043 + _RELATEDENTITY._serialized_start = 1045 + _RELATEDENTITY._serialized_end = 1139 + _DATAREQUESTPROTO._serialized_start = 1142 + _DATAREQUESTPROTO._serialized_end = 1424 + _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_start = 1331 + _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_end = 1424 + _SINGLEDOCUMENTREQUESTPROTO._serialized_start = 1427 + _SINGLEDOCUMENTREQUESTPROTO._serialized_end = 1607 + _DATAREQUESTPROTOWODATA._serialized_start = 1610 + _DATAREQUESTPROTOWODATA._serialized_end = 1748 + _DATAREQUESTLISTPROTO._serialized_start = 1750 + _DATAREQUESTLISTPROTO._serialized_end = 1814 + _SNAPSHOTID._serialized_start = 1816 + _SNAPSHOTID._serialized_end = 1843 + _RESTOREID._serialized_start = 1845 + _RESTOREID._serialized_end = 1871 + _SNAPSHOTSTATUSPROTO._serialized_start = 1874 + _SNAPSHOTSTATUSPROTO._serialized_end = 2113 + _SNAPSHOTSTATUSPROTO_STATUS._serialized_start = 2000 + _SNAPSHOTSTATUSPROTO_STATUS._serialized_end = 2113 + _RESTORESNAPSHOTSTATUSPROTO._serialized_start = 2116 + _RESTORESNAPSHOTSTATUSPROTO._serialized_end = 2318 + _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_start = 2232 + _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_end = 2318 + _RESTORESNAPSHOTCOMMAND._serialized_start = 2320 + _RESTORESNAPSHOTCOMMAND._serialized_end = 2367 + _JINADATAREQUESTRPC._serialized_start = 2369 + _JINADATAREQUESTRPC._serialized_end = 2459 + _JINASINGLEDATAREQUESTRPC._serialized_start = 2461 + _JINASINGLEDATAREQUESTRPC._serialized_end = 2560 + _JINASINGLEDOCUMENTREQUESTRPC._serialized_start = 2562 + _JINASINGLEDOCUMENTREQUESTRPC._serialized_end = 2678 + _JINARPC._serialized_start = 2680 + _JINARPC._serialized_end = 2751 + _JINADISCOVERENDPOINTSRPC._serialized_start = 2753 + _JINADISCOVERENDPOINTSRPC._serialized_end = 2849 + _JINAGATEWAYDRYRUNRPC._serialized_start = 2851 + _JINAGATEWAYDRYRUNRPC._serialized_end = 2929 + _JINAINFORPC._serialized_start = 2931 + _JINAINFORPC._serialized_end = 3002 + _JINAEXECUTORSNAPSHOT._serialized_start = 3004 + _JINAEXECUTORSNAPSHOT._serialized_end = 3091 + _JINAEXECUTORSNAPSHOTPROGRESS._serialized_start = 3093 + _JINAEXECUTORSNAPSHOTPROGRESS._serialized_end = 3189 + _JINAEXECUTORRESTORE._serialized_start = 3191 + _JINAEXECUTORRESTORE._serialized_end = 3289 + _JINAEXECUTORRESTOREPROGRESS._serialized_start = 3291 + _JINAEXECUTORRESTOREPROGRESS._serialized_end = 3391 # @@protoc_insertion_point(module_scope) diff --git a/jina/proto/docarray_v2/pb/jina_pb2_grpc.py b/jina/proto/docarray_v2/pb/jina_pb2_grpc.py index f52ce19e69412..f571beae83675 100644 --- a/jina/proto/docarray_v2/pb/jina_pb2_grpc.py +++ b/jina/proto/docarray_v2/pb/jina_pb2_grpc.py @@ -18,10 +18,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.process_data = channel.unary_unary( - '/jina.JinaDataRequestRPC/process_data', - request_serializer=jina__pb2.DataRequestListProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaDataRequestRPC/process_data', + request_serializer=jina__pb2.DataRequestListProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaDataRequestRPCServicer(object): @@ -30,8 +30,7 @@ class JinaDataRequestRPCServicer(object): """ def process_data(self, request, context): - """Used for passing DataRequests to the Executors - """ + """Used for passing DataRequests to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -39,39 +38,52 @@ def process_data(self, request, context): def add_JinaDataRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'process_data': grpc.unary_unary_rpc_method_handler( - servicer.process_data, - request_deserializer=jina__pb2.DataRequestListProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'process_data': grpc.unary_unary_rpc_method_handler( + servicer.process_data, + request_deserializer=jina__pb2.DataRequestListProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaDataRequestRPC', rpc_method_handlers) + 'jina.JinaDataRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaDataRequestRPC(object): """* jina gRPC service for DataRequests. """ @staticmethod - def process_data(request, + def process_data( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaDataRequestRPC/process_data', + '/jina.JinaDataRequestRPC/process_data', jina__pb2.DataRequestListProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaSingleDataRequestRPCStub(object): @@ -87,10 +99,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.process_single_data = channel.unary_unary( - '/jina.JinaSingleDataRequestRPC/process_single_data', - request_serializer=jina__pb2.DataRequestProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaSingleDataRequestRPC/process_single_data', + request_serializer=jina__pb2.DataRequestProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaSingleDataRequestRPCServicer(object): @@ -100,8 +112,7 @@ class JinaSingleDataRequestRPCServicer(object): """ def process_single_data(self, request, context): - """Used for passing DataRequests to the Executors - """ + """Used for passing DataRequests to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -109,18 +120,19 @@ def process_single_data(self, request, context): def add_JinaSingleDataRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'process_single_data': grpc.unary_unary_rpc_method_handler( - servicer.process_single_data, - request_deserializer=jina__pb2.DataRequestProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'process_single_data': grpc.unary_unary_rpc_method_handler( + servicer.process_single_data, + request_deserializer=jina__pb2.DataRequestProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaSingleDataRequestRPC', rpc_method_handlers) + 'jina.JinaSingleDataRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaSingleDataRequestRPC(object): """* jina gRPC service for DataRequests. @@ -128,21 +140,33 @@ class JinaSingleDataRequestRPC(object): """ @staticmethod - def process_single_data(request, + def process_single_data( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaSingleDataRequestRPC/process_single_data', + '/jina.JinaSingleDataRequestRPC/process_single_data', jina__pb2.DataRequestProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaSingleDocumentRequestRPCStub(object): @@ -158,10 +182,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.stream_doc = channel.unary_stream( - '/jina.JinaSingleDocumentRequestRPC/stream_doc', - request_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, - response_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, - ) + '/jina.JinaSingleDocumentRequestRPC/stream_doc', + request_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, + response_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, + ) class JinaSingleDocumentRequestRPCServicer(object): @@ -171,8 +195,7 @@ class JinaSingleDocumentRequestRPCServicer(object): """ def stream_doc(self, request, context): - """Used for streaming one document to the Executors - """ + """Used for streaming one document to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -180,18 +203,19 @@ def stream_doc(self, request, context): def add_JinaSingleDocumentRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'stream_doc': grpc.unary_stream_rpc_method_handler( - servicer.stream_doc, - request_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, - response_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, - ), + 'stream_doc': grpc.unary_stream_rpc_method_handler( + servicer.stream_doc, + request_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, + response_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaSingleDocumentRequestRPC', rpc_method_handlers) + 'jina.JinaSingleDocumentRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaSingleDocumentRequestRPC(object): """* jina gRPC service for DataRequests. @@ -199,21 +223,33 @@ class JinaSingleDocumentRequestRPC(object): """ @staticmethod - def stream_doc(request, + def stream_doc( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream(request, target, '/jina.JinaSingleDocumentRequestRPC/stream_doc', + '/jina.JinaSingleDocumentRequestRPC/stream_doc', jina__pb2.SingleDocumentRequestProto.SerializeToString, jina__pb2.SingleDocumentRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaRPCStub(object): @@ -228,10 +264,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.Call = channel.stream_stream( - '/jina.JinaRPC/Call', - request_serializer=jina__pb2.DataRequestProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaRPC/Call', + request_serializer=jina__pb2.DataRequestProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaRPCServicer(object): @@ -240,8 +276,7 @@ class JinaRPCServicer(object): """ def Call(self, request_iterator, context): - """Pass in a Request and a filled Request with matches will be returned. - """ + """Pass in a Request and a filled Request with matches will be returned.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -249,39 +284,52 @@ def Call(self, request_iterator, context): def add_JinaRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'Call': grpc.stream_stream_rpc_method_handler( - servicer.Call, - request_deserializer=jina__pb2.DataRequestProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'Call': grpc.stream_stream_rpc_method_handler( + servicer.Call, + request_deserializer=jina__pb2.DataRequestProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaRPC', rpc_method_handlers) + 'jina.JinaRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaRPC(object): """* jina streaming gRPC service. """ @staticmethod - def Call(request_iterator, + def Call( + request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.stream_stream( + request_iterator, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream(request_iterator, target, '/jina.JinaRPC/Call', + '/jina.JinaRPC/Call', jina__pb2.DataRequestProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaDiscoverEndpointsRPCStub(object): @@ -296,10 +344,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.endpoint_discovery = channel.unary_unary( - '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.EndpointsProto.FromString, - ) + '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.EndpointsProto.FromString, + ) class JinaDiscoverEndpointsRPCServicer(object): @@ -316,39 +364,52 @@ def endpoint_discovery(self, request, context): def add_JinaDiscoverEndpointsRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'endpoint_discovery': grpc.unary_unary_rpc_method_handler( - servicer.endpoint_discovery, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.EndpointsProto.SerializeToString, - ), + 'endpoint_discovery': grpc.unary_unary_rpc_method_handler( + servicer.endpoint_discovery, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.EndpointsProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaDiscoverEndpointsRPC', rpc_method_handlers) + 'jina.JinaDiscoverEndpointsRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaDiscoverEndpointsRPC(object): """* jina gRPC service to expose Endpoints from Executors. """ @staticmethod - def endpoint_discovery(request, + def endpoint_discovery( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', + '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.EndpointsProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaGatewayDryRunRPCStub(object): @@ -363,10 +424,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.dry_run = channel.unary_unary( - '/jina.JinaGatewayDryRunRPC/dry_run', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.StatusProto.FromString, - ) + '/jina.JinaGatewayDryRunRPC/dry_run', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.StatusProto.FromString, + ) class JinaGatewayDryRunRPCServicer(object): @@ -383,39 +444,52 @@ def dry_run(self, request, context): def add_JinaGatewayDryRunRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'dry_run': grpc.unary_unary_rpc_method_handler( - servicer.dry_run, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.StatusProto.SerializeToString, - ), + 'dry_run': grpc.unary_unary_rpc_method_handler( + servicer.dry_run, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.StatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaGatewayDryRunRPC', rpc_method_handlers) + 'jina.JinaGatewayDryRunRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaGatewayDryRunRPC(object): """* jina gRPC service to expose Endpoints from Executors. """ @staticmethod - def dry_run(request, + def dry_run( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaGatewayDryRunRPC/dry_run', + '/jina.JinaGatewayDryRunRPC/dry_run', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.StatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaInfoRPCStub(object): @@ -430,10 +504,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self._status = channel.unary_unary( - '/jina.JinaInfoRPC/_status', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.JinaInfoProto.FromString, - ) + '/jina.JinaInfoRPC/_status', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.JinaInfoProto.FromString, + ) class JinaInfoRPCServicer(object): @@ -450,39 +524,52 @@ def _status(self, request, context): def add_JinaInfoRPCServicer_to_server(servicer, server): rpc_method_handlers = { - '_status': grpc.unary_unary_rpc_method_handler( - servicer._status, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.JinaInfoProto.SerializeToString, - ), + '_status': grpc.unary_unary_rpc_method_handler( + servicer._status, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.JinaInfoProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaInfoRPC', rpc_method_handlers) + 'jina.JinaInfoRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaInfoRPC(object): """* jina gRPC service to expose information about running jina version and environment. """ @staticmethod - def _status(request, + def _status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaInfoRPC/_status', + '/jina.JinaInfoRPC/_status', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.JinaInfoProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorSnapshotStub(object): @@ -497,10 +584,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.snapshot = channel.unary_unary( - '/jina.JinaExecutorSnapshot/snapshot', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.SnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorSnapshot/snapshot', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.SnapshotStatusProto.FromString, + ) class JinaExecutorSnapshotServicer(object): @@ -517,39 +604,52 @@ def snapshot(self, request, context): def add_JinaExecutorSnapshotServicer_to_server(servicer, server): rpc_method_handlers = { - 'snapshot': grpc.unary_unary_rpc_method_handler( - servicer.snapshot, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, - ), + 'snapshot': grpc.unary_unary_rpc_method_handler( + servicer.snapshot, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorSnapshot', rpc_method_handlers) + 'jina.JinaExecutorSnapshot', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorSnapshot(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def snapshot(request, + def snapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorSnapshot/snapshot', + '/jina.JinaExecutorSnapshot/snapshot', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.SnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorSnapshotProgressStub(object): @@ -564,10 +664,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.snapshot_status = channel.unary_unary( - '/jina.JinaExecutorSnapshotProgress/snapshot_status', - request_serializer=jina__pb2.SnapshotId.SerializeToString, - response_deserializer=jina__pb2.SnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorSnapshotProgress/snapshot_status', + request_serializer=jina__pb2.SnapshotId.SerializeToString, + response_deserializer=jina__pb2.SnapshotStatusProto.FromString, + ) class JinaExecutorSnapshotProgressServicer(object): @@ -584,39 +684,52 @@ def snapshot_status(self, request, context): def add_JinaExecutorSnapshotProgressServicer_to_server(servicer, server): rpc_method_handlers = { - 'snapshot_status': grpc.unary_unary_rpc_method_handler( - servicer.snapshot_status, - request_deserializer=jina__pb2.SnapshotId.FromString, - response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, - ), + 'snapshot_status': grpc.unary_unary_rpc_method_handler( + servicer.snapshot_status, + request_deserializer=jina__pb2.SnapshotId.FromString, + response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorSnapshotProgress', rpc_method_handlers) + 'jina.JinaExecutorSnapshotProgress', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorSnapshotProgress(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def snapshot_status(request, + def snapshot_status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorSnapshotProgress/snapshot_status', + '/jina.JinaExecutorSnapshotProgress/snapshot_status', jina__pb2.SnapshotId.SerializeToString, jina__pb2.SnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorRestoreStub(object): @@ -631,10 +744,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.restore = channel.unary_unary( - '/jina.JinaExecutorRestore/restore', - request_serializer=jina__pb2.RestoreSnapshotCommand.SerializeToString, - response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorRestore/restore', + request_serializer=jina__pb2.RestoreSnapshotCommand.SerializeToString, + response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, + ) class JinaExecutorRestoreServicer(object): @@ -651,39 +764,52 @@ def restore(self, request, context): def add_JinaExecutorRestoreServicer_to_server(servicer, server): rpc_method_handlers = { - 'restore': grpc.unary_unary_rpc_method_handler( - servicer.restore, - request_deserializer=jina__pb2.RestoreSnapshotCommand.FromString, - response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, - ), + 'restore': grpc.unary_unary_rpc_method_handler( + servicer.restore, + request_deserializer=jina__pb2.RestoreSnapshotCommand.FromString, + response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorRestore', rpc_method_handlers) + 'jina.JinaExecutorRestore', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorRestore(object): """* jina gRPC service to trigger a restore at the Executor Runtime. """ @staticmethod - def restore(request, + def restore( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorRestore/restore', + '/jina.JinaExecutorRestore/restore', jina__pb2.RestoreSnapshotCommand.SerializeToString, jina__pb2.RestoreSnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorRestoreProgressStub(object): @@ -698,10 +824,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.restore_status = channel.unary_unary( - '/jina.JinaExecutorRestoreProgress/restore_status', - request_serializer=jina__pb2.RestoreId.SerializeToString, - response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorRestoreProgress/restore_status', + request_serializer=jina__pb2.RestoreId.SerializeToString, + response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, + ) class JinaExecutorRestoreProgressServicer(object): @@ -718,36 +844,49 @@ def restore_status(self, request, context): def add_JinaExecutorRestoreProgressServicer_to_server(servicer, server): rpc_method_handlers = { - 'restore_status': grpc.unary_unary_rpc_method_handler( - servicer.restore_status, - request_deserializer=jina__pb2.RestoreId.FromString, - response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, - ), + 'restore_status': grpc.unary_unary_rpc_method_handler( + servicer.restore_status, + request_deserializer=jina__pb2.RestoreId.FromString, + response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorRestoreProgress', rpc_method_handlers) + 'jina.JinaExecutorRestoreProgress', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorRestoreProgress(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def restore_status(request, + def restore_status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorRestoreProgress/restore_status', + '/jina.JinaExecutorRestoreProgress/restore_status', jina__pb2.RestoreId.SerializeToString, jina__pb2.RestoreSnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/jina/proto/docarray_v2/pb2/jina_pb2.py b/jina/proto/docarray_v2/pb2/jina_pb2.py index 828b0ed533d91..b1dc9b7c11667 100644 --- a/jina/proto/docarray_v2/pb2/jina_pb2.py +++ b/jina/proto/docarray_v2/pb2/jina_pb2.py @@ -7,6 +7,7 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -18,8 +19,9 @@ import docarray.proto.pb2.docarray_pb2 as docarray__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\njina.proto\x12\x04jina\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x0e\x64ocarray.proto\"\x9f\x01\n\nRouteProto\x12\x10\n\x08\x65xecutor\x18\x01 \x01(\t\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x06status\x18\x04 \x01(\x0b\x32\x11.jina.StatusProto\"\xc3\x01\n\rJinaInfoProto\x12+\n\x04jina\x18\x01 \x03(\x0b\x32\x1d.jina.JinaInfoProto.JinaEntry\x12+\n\x04\x65nvs\x18\x02 \x03(\x0b\x32\x1d.jina.JinaInfoProto.EnvsEntry\x1a+\n\tJinaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc6\x01\n\x0bHeaderProto\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0b\x32\x11.jina.StatusProto\x12\x1a\n\rexec_endpoint\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x0ftarget_executor\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07timeout\x18\x05 \x01(\rH\x02\x88\x01\x01\x42\x10\n\x0e_exec_endpointB\x12\n\x10_target_executorB\n\n\x08_timeout\"f\n\x0e\x45ndpointsProto\x12\x11\n\tendpoints\x18\x01 \x03(\t\x12\x17\n\x0fwrite_endpoints\x18\x02 \x03(\t\x12(\n\x07schemas\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xf9\x01\n\x0bStatusProto\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.jina.StatusProto.StatusCode\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x33\n\texception\x18\x03 \x01(\x0b\x32 .jina.StatusProto.ExceptionProto\x1aN\n\x0e\x45xceptionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x0e\n\x06stacks\x18\x03 \x03(\t\x12\x10\n\x08\x65xecutor\x18\x04 \x01(\t\"$\n\nStatusCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"^\n\rRelatedEntity\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\x08shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x42\x0b\n\t_shard_id\"\x9a\x02\n\x10\x44\x61taRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.jina.DataRequestProto.DataContentProto\x1a]\n\x10\x44\x61taContentProto\x12&\n\x04\x64ocs\x18\x01 \x01(\x0b\x32\x16.docarray.DocListProtoH\x00\x12\x14\n\ndocs_bytes\x18\x02 \x01(\x0cH\x00\x42\x0b\n\tdocuments\"\xb4\x01\n\x1aSingleDocumentRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12$\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x12.docarray.DocProto\"\x8a\x01\n\x16\x44\x61taRequestProtoWoData\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\"@\n\x14\x44\x61taRequestListProto\x12(\n\x08requests\x18\x01 \x03(\x0b\x32\x16.jina.DataRequestProto\"\x1b\n\nSnapshotId\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tRestoreId\x12\r\n\x05value\x18\x01 \x01(\t\"\xef\x01\n\x13SnapshotStatusProto\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.jina.SnapshotId\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .jina.SnapshotStatusProto.Status\x12\x15\n\rsnapshot_file\x18\x03 \x01(\t\"q\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\r\n\tNOT_FOUND\x10\x06\"\xca\x01\n\x1aRestoreSnapshotStatusProto\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.jina.RestoreId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.jina.RestoreSnapshotStatusProto.Status\"V\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tNOT_FOUND\x10\x06\"/\n\x16RestoreSnapshotCommand\x12\x15\n\rsnapshot_file\x18\x01 \x01(\t2Z\n\x12JinaDataRequestRPC\x12\x44\n\x0cprocess_data\x12\x1a.jina.DataRequestListProto\x1a\x16.jina.DataRequestProto\"\x00\x32\x63\n\x18JinaSingleDataRequestRPC\x12G\n\x13process_single_data\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00\x32t\n\x1cJinaSingleDocumentRequestRPC\x12T\n\nstream_doc\x12 .jina.SingleDocumentRequestProto\x1a .jina.SingleDocumentRequestProto\"\x00\x30\x01\x32G\n\x07JinaRPC\x12<\n\x04\x43\x61ll\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00(\x01\x30\x01\x32`\n\x18JinaDiscoverEndpointsRPC\x12\x44\n\x12\x65ndpoint_discovery\x12\x16.google.protobuf.Empty\x1a\x14.jina.EndpointsProto\"\x00\x32N\n\x14JinaGatewayDryRunRPC\x12\x36\n\x07\x64ry_run\x12\x16.google.protobuf.Empty\x1a\x11.jina.StatusProto\"\x00\x32G\n\x0bJinaInfoRPC\x12\x38\n\x07_status\x12\x16.google.protobuf.Empty\x1a\x13.jina.JinaInfoProto\"\x00\x32W\n\x14JinaExecutorSnapshot\x12?\n\x08snapshot\x12\x16.google.protobuf.Empty\x1a\x19.jina.SnapshotStatusProto\"\x00\x32`\n\x1cJinaExecutorSnapshotProgress\x12@\n\x0fsnapshot_status\x12\x10.jina.SnapshotId\x1a\x19.jina.SnapshotStatusProto\"\x00\x32\x62\n\x13JinaExecutorRestore\x12K\n\x07restore\x12\x1c.jina.RestoreSnapshotCommand\x1a .jina.RestoreSnapshotStatusProto\"\x00\x32\x64\n\x1bJinaExecutorRestoreProgress\x12\x45\n\x0erestore_status\x12\x0f.jina.RestoreId\x1a .jina.RestoreSnapshotStatusProto\"\x00\x62\x06proto3') - +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\njina.proto\x12\x04jina\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x0e\x64ocarray.proto\"\x9f\x01\n\nRouteProto\x12\x10\n\x08\x65xecutor\x18\x01 \x01(\t\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x06status\x18\x04 \x01(\x0b\x32\x11.jina.StatusProto\"\xc3\x01\n\rJinaInfoProto\x12+\n\x04jina\x18\x01 \x03(\x0b\x32\x1d.jina.JinaInfoProto.JinaEntry\x12+\n\x04\x65nvs\x18\x02 \x03(\x0b\x32\x1d.jina.JinaInfoProto.EnvsEntry\x1a+\n\tJinaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc6\x01\n\x0bHeaderProto\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0b\x32\x11.jina.StatusProto\x12\x1a\n\rexec_endpoint\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x0ftarget_executor\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07timeout\x18\x05 \x01(\rH\x02\x88\x01\x01\x42\x10\n\x0e_exec_endpointB\x12\n\x10_target_executorB\n\n\x08_timeout\"f\n\x0e\x45ndpointsProto\x12\x11\n\tendpoints\x18\x01 \x03(\t\x12\x17\n\x0fwrite_endpoints\x18\x02 \x03(\t\x12(\n\x07schemas\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xf9\x01\n\x0bStatusProto\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.jina.StatusProto.StatusCode\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x33\n\texception\x18\x03 \x01(\x0b\x32 .jina.StatusProto.ExceptionProto\x1aN\n\x0e\x45xceptionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x0e\n\x06stacks\x18\x03 \x03(\t\x12\x10\n\x08\x65xecutor\x18\x04 \x01(\t\"$\n\nStatusCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"^\n\rRelatedEntity\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\x08shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x42\x0b\n\t_shard_id\"\x9a\x02\n\x10\x44\x61taRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.jina.DataRequestProto.DataContentProto\x1a]\n\x10\x44\x61taContentProto\x12&\n\x04\x64ocs\x18\x01 \x01(\x0b\x32\x16.docarray.DocListProtoH\x00\x12\x14\n\ndocs_bytes\x18\x02 \x01(\x0cH\x00\x42\x0b\n\tdocuments\"\xb4\x01\n\x1aSingleDocumentRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12$\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x12.docarray.DocProto\"\x8a\x01\n\x16\x44\x61taRequestProtoWoData\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\"@\n\x14\x44\x61taRequestListProto\x12(\n\x08requests\x18\x01 \x03(\x0b\x32\x16.jina.DataRequestProto\"\x1b\n\nSnapshotId\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tRestoreId\x12\r\n\x05value\x18\x01 \x01(\t\"\xef\x01\n\x13SnapshotStatusProto\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.jina.SnapshotId\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .jina.SnapshotStatusProto.Status\x12\x15\n\rsnapshot_file\x18\x03 \x01(\t\"q\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\r\n\tNOT_FOUND\x10\x06\"\xca\x01\n\x1aRestoreSnapshotStatusProto\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.jina.RestoreId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.jina.RestoreSnapshotStatusProto.Status\"V\n\x06Status\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tNOT_FOUND\x10\x06\"/\n\x16RestoreSnapshotCommand\x12\x15\n\rsnapshot_file\x18\x01 \x01(\t2Z\n\x12JinaDataRequestRPC\x12\x44\n\x0cprocess_data\x12\x1a.jina.DataRequestListProto\x1a\x16.jina.DataRequestProto\"\x00\x32\x63\n\x18JinaSingleDataRequestRPC\x12G\n\x13process_single_data\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00\x32t\n\x1cJinaSingleDocumentRequestRPC\x12T\n\nstream_doc\x12 .jina.SingleDocumentRequestProto\x1a .jina.SingleDocumentRequestProto\"\x00\x30\x01\x32G\n\x07JinaRPC\x12<\n\x04\x43\x61ll\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00(\x01\x30\x01\x32`\n\x18JinaDiscoverEndpointsRPC\x12\x44\n\x12\x65ndpoint_discovery\x12\x16.google.protobuf.Empty\x1a\x14.jina.EndpointsProto\"\x00\x32N\n\x14JinaGatewayDryRunRPC\x12\x36\n\x07\x64ry_run\x12\x16.google.protobuf.Empty\x1a\x11.jina.StatusProto\"\x00\x32G\n\x0bJinaInfoRPC\x12\x38\n\x07_status\x12\x16.google.protobuf.Empty\x1a\x13.jina.JinaInfoProto\"\x00\x32W\n\x14JinaExecutorSnapshot\x12?\n\x08snapshot\x12\x16.google.protobuf.Empty\x1a\x19.jina.SnapshotStatusProto\"\x00\x32`\n\x1cJinaExecutorSnapshotProgress\x12@\n\x0fsnapshot_status\x12\x10.jina.SnapshotId\x1a\x19.jina.SnapshotStatusProto\"\x00\x32\x62\n\x13JinaExecutorRestore\x12K\n\x07restore\x12\x1c.jina.RestoreSnapshotCommand\x1a .jina.RestoreSnapshotStatusProto\"\x00\x32\x64\n\x1bJinaExecutorRestoreProgress\x12\x45\n\x0erestore_status\x12\x0f.jina.RestoreId\x1a .jina.RestoreSnapshotStatusProto\"\x00\x62\x06proto3' +) _ROUTEPROTO = DESCRIPTOR.message_types_by_name['RouteProto'] @@ -32,237 +34,319 @@ _STATUSPROTO_EXCEPTIONPROTO = _STATUSPROTO.nested_types_by_name['ExceptionProto'] _RELATEDENTITY = DESCRIPTOR.message_types_by_name['RelatedEntity'] _DATAREQUESTPROTO = DESCRIPTOR.message_types_by_name['DataRequestProto'] -_DATAREQUESTPROTO_DATACONTENTPROTO = _DATAREQUESTPROTO.nested_types_by_name['DataContentProto'] -_SINGLEDOCUMENTREQUESTPROTO = DESCRIPTOR.message_types_by_name['SingleDocumentRequestProto'] +_DATAREQUESTPROTO_DATACONTENTPROTO = _DATAREQUESTPROTO.nested_types_by_name[ + 'DataContentProto' +] +_SINGLEDOCUMENTREQUESTPROTO = DESCRIPTOR.message_types_by_name[ + 'SingleDocumentRequestProto' +] _DATAREQUESTPROTOWODATA = DESCRIPTOR.message_types_by_name['DataRequestProtoWoData'] _DATAREQUESTLISTPROTO = DESCRIPTOR.message_types_by_name['DataRequestListProto'] _SNAPSHOTID = DESCRIPTOR.message_types_by_name['SnapshotId'] _RESTOREID = DESCRIPTOR.message_types_by_name['RestoreId'] _SNAPSHOTSTATUSPROTO = DESCRIPTOR.message_types_by_name['SnapshotStatusProto'] -_RESTORESNAPSHOTSTATUSPROTO = DESCRIPTOR.message_types_by_name['RestoreSnapshotStatusProto'] +_RESTORESNAPSHOTSTATUSPROTO = DESCRIPTOR.message_types_by_name[ + 'RestoreSnapshotStatusProto' +] _RESTORESNAPSHOTCOMMAND = DESCRIPTOR.message_types_by_name['RestoreSnapshotCommand'] _STATUSPROTO_STATUSCODE = _STATUSPROTO.enum_types_by_name['StatusCode'] _SNAPSHOTSTATUSPROTO_STATUS = _SNAPSHOTSTATUSPROTO.enum_types_by_name['Status'] -_RESTORESNAPSHOTSTATUSPROTO_STATUS = _RESTORESNAPSHOTSTATUSPROTO.enum_types_by_name['Status'] -RouteProto = _reflection.GeneratedProtocolMessageType('RouteProto', (_message.Message,), { - 'DESCRIPTOR' : _ROUTEPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RouteProto) - }) +_RESTORESNAPSHOTSTATUSPROTO_STATUS = _RESTORESNAPSHOTSTATUSPROTO.enum_types_by_name[ + 'Status' +] +RouteProto = _reflection.GeneratedProtocolMessageType( + 'RouteProto', + (_message.Message,), + { + 'DESCRIPTOR': _ROUTEPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RouteProto) + }, +) _sym_db.RegisterMessage(RouteProto) -JinaInfoProto = _reflection.GeneratedProtocolMessageType('JinaInfoProto', (_message.Message,), { - - 'JinaEntry' : _reflection.GeneratedProtocolMessageType('JinaEntry', (_message.Message,), { - 'DESCRIPTOR' : _JINAINFOPROTO_JINAENTRY, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.JinaInfoProto.JinaEntry) - }) - , - - 'EnvsEntry' : _reflection.GeneratedProtocolMessageType('EnvsEntry', (_message.Message,), { - 'DESCRIPTOR' : _JINAINFOPROTO_ENVSENTRY, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.JinaInfoProto.EnvsEntry) - }) - , - 'DESCRIPTOR' : _JINAINFOPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.JinaInfoProto) - }) +JinaInfoProto = _reflection.GeneratedProtocolMessageType( + 'JinaInfoProto', + (_message.Message,), + { + 'JinaEntry': _reflection.GeneratedProtocolMessageType( + 'JinaEntry', + (_message.Message,), + { + 'DESCRIPTOR': _JINAINFOPROTO_JINAENTRY, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.JinaInfoProto.JinaEntry) + }, + ), + 'EnvsEntry': _reflection.GeneratedProtocolMessageType( + 'EnvsEntry', + (_message.Message,), + { + 'DESCRIPTOR': _JINAINFOPROTO_ENVSENTRY, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.JinaInfoProto.EnvsEntry) + }, + ), + 'DESCRIPTOR': _JINAINFOPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.JinaInfoProto) + }, +) _sym_db.RegisterMessage(JinaInfoProto) _sym_db.RegisterMessage(JinaInfoProto.JinaEntry) _sym_db.RegisterMessage(JinaInfoProto.EnvsEntry) -HeaderProto = _reflection.GeneratedProtocolMessageType('HeaderProto', (_message.Message,), { - 'DESCRIPTOR' : _HEADERPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.HeaderProto) - }) +HeaderProto = _reflection.GeneratedProtocolMessageType( + 'HeaderProto', + (_message.Message,), + { + 'DESCRIPTOR': _HEADERPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.HeaderProto) + }, +) _sym_db.RegisterMessage(HeaderProto) -EndpointsProto = _reflection.GeneratedProtocolMessageType('EndpointsProto', (_message.Message,), { - 'DESCRIPTOR' : _ENDPOINTSPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.EndpointsProto) - }) +EndpointsProto = _reflection.GeneratedProtocolMessageType( + 'EndpointsProto', + (_message.Message,), + { + 'DESCRIPTOR': _ENDPOINTSPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.EndpointsProto) + }, +) _sym_db.RegisterMessage(EndpointsProto) -StatusProto = _reflection.GeneratedProtocolMessageType('StatusProto', (_message.Message,), { - - 'ExceptionProto' : _reflection.GeneratedProtocolMessageType('ExceptionProto', (_message.Message,), { - 'DESCRIPTOR' : _STATUSPROTO_EXCEPTIONPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.StatusProto.ExceptionProto) - }) - , - 'DESCRIPTOR' : _STATUSPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.StatusProto) - }) +StatusProto = _reflection.GeneratedProtocolMessageType( + 'StatusProto', + (_message.Message,), + { + 'ExceptionProto': _reflection.GeneratedProtocolMessageType( + 'ExceptionProto', + (_message.Message,), + { + 'DESCRIPTOR': _STATUSPROTO_EXCEPTIONPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.StatusProto.ExceptionProto) + }, + ), + 'DESCRIPTOR': _STATUSPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.StatusProto) + }, +) _sym_db.RegisterMessage(StatusProto) _sym_db.RegisterMessage(StatusProto.ExceptionProto) -RelatedEntity = _reflection.GeneratedProtocolMessageType('RelatedEntity', (_message.Message,), { - 'DESCRIPTOR' : _RELATEDENTITY, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RelatedEntity) - }) +RelatedEntity = _reflection.GeneratedProtocolMessageType( + 'RelatedEntity', + (_message.Message,), + { + 'DESCRIPTOR': _RELATEDENTITY, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RelatedEntity) + }, +) _sym_db.RegisterMessage(RelatedEntity) -DataRequestProto = _reflection.GeneratedProtocolMessageType('DataRequestProto', (_message.Message,), { - - 'DataContentProto' : _reflection.GeneratedProtocolMessageType('DataContentProto', (_message.Message,), { - 'DESCRIPTOR' : _DATAREQUESTPROTO_DATACONTENTPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.DataRequestProto.DataContentProto) - }) - , - 'DESCRIPTOR' : _DATAREQUESTPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.DataRequestProto) - }) +DataRequestProto = _reflection.GeneratedProtocolMessageType( + 'DataRequestProto', + (_message.Message,), + { + 'DataContentProto': _reflection.GeneratedProtocolMessageType( + 'DataContentProto', + (_message.Message,), + { + 'DESCRIPTOR': _DATAREQUESTPROTO_DATACONTENTPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.DataRequestProto.DataContentProto) + }, + ), + 'DESCRIPTOR': _DATAREQUESTPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.DataRequestProto) + }, +) _sym_db.RegisterMessage(DataRequestProto) _sym_db.RegisterMessage(DataRequestProto.DataContentProto) -SingleDocumentRequestProto = _reflection.GeneratedProtocolMessageType('SingleDocumentRequestProto', (_message.Message,), { - 'DESCRIPTOR' : _SINGLEDOCUMENTREQUESTPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.SingleDocumentRequestProto) - }) +SingleDocumentRequestProto = _reflection.GeneratedProtocolMessageType( + 'SingleDocumentRequestProto', + (_message.Message,), + { + 'DESCRIPTOR': _SINGLEDOCUMENTREQUESTPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.SingleDocumentRequestProto) + }, +) _sym_db.RegisterMessage(SingleDocumentRequestProto) -DataRequestProtoWoData = _reflection.GeneratedProtocolMessageType('DataRequestProtoWoData', (_message.Message,), { - 'DESCRIPTOR' : _DATAREQUESTPROTOWODATA, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.DataRequestProtoWoData) - }) +DataRequestProtoWoData = _reflection.GeneratedProtocolMessageType( + 'DataRequestProtoWoData', + (_message.Message,), + { + 'DESCRIPTOR': _DATAREQUESTPROTOWODATA, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.DataRequestProtoWoData) + }, +) _sym_db.RegisterMessage(DataRequestProtoWoData) -DataRequestListProto = _reflection.GeneratedProtocolMessageType('DataRequestListProto', (_message.Message,), { - 'DESCRIPTOR' : _DATAREQUESTLISTPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.DataRequestListProto) - }) +DataRequestListProto = _reflection.GeneratedProtocolMessageType( + 'DataRequestListProto', + (_message.Message,), + { + 'DESCRIPTOR': _DATAREQUESTLISTPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.DataRequestListProto) + }, +) _sym_db.RegisterMessage(DataRequestListProto) -SnapshotId = _reflection.GeneratedProtocolMessageType('SnapshotId', (_message.Message,), { - 'DESCRIPTOR' : _SNAPSHOTID, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.SnapshotId) - }) +SnapshotId = _reflection.GeneratedProtocolMessageType( + 'SnapshotId', + (_message.Message,), + { + 'DESCRIPTOR': _SNAPSHOTID, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.SnapshotId) + }, +) _sym_db.RegisterMessage(SnapshotId) -RestoreId = _reflection.GeneratedProtocolMessageType('RestoreId', (_message.Message,), { - 'DESCRIPTOR' : _RESTOREID, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RestoreId) - }) +RestoreId = _reflection.GeneratedProtocolMessageType( + 'RestoreId', + (_message.Message,), + { + 'DESCRIPTOR': _RESTOREID, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RestoreId) + }, +) _sym_db.RegisterMessage(RestoreId) -SnapshotStatusProto = _reflection.GeneratedProtocolMessageType('SnapshotStatusProto', (_message.Message,), { - 'DESCRIPTOR' : _SNAPSHOTSTATUSPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.SnapshotStatusProto) - }) +SnapshotStatusProto = _reflection.GeneratedProtocolMessageType( + 'SnapshotStatusProto', + (_message.Message,), + { + 'DESCRIPTOR': _SNAPSHOTSTATUSPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.SnapshotStatusProto) + }, +) _sym_db.RegisterMessage(SnapshotStatusProto) -RestoreSnapshotStatusProto = _reflection.GeneratedProtocolMessageType('RestoreSnapshotStatusProto', (_message.Message,), { - 'DESCRIPTOR' : _RESTORESNAPSHOTSTATUSPROTO, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RestoreSnapshotStatusProto) - }) +RestoreSnapshotStatusProto = _reflection.GeneratedProtocolMessageType( + 'RestoreSnapshotStatusProto', + (_message.Message,), + { + 'DESCRIPTOR': _RESTORESNAPSHOTSTATUSPROTO, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RestoreSnapshotStatusProto) + }, +) _sym_db.RegisterMessage(RestoreSnapshotStatusProto) -RestoreSnapshotCommand = _reflection.GeneratedProtocolMessageType('RestoreSnapshotCommand', (_message.Message,), { - 'DESCRIPTOR' : _RESTORESNAPSHOTCOMMAND, - '__module__' : 'jina_pb2' - # @@protoc_insertion_point(class_scope:jina.RestoreSnapshotCommand) - }) +RestoreSnapshotCommand = _reflection.GeneratedProtocolMessageType( + 'RestoreSnapshotCommand', + (_message.Message,), + { + 'DESCRIPTOR': _RESTORESNAPSHOTCOMMAND, + '__module__': 'jina_pb2', + # @@protoc_insertion_point(class_scope:jina.RestoreSnapshotCommand) + }, +) _sym_db.RegisterMessage(RestoreSnapshotCommand) _JINADATAREQUESTRPC = DESCRIPTOR.services_by_name['JinaDataRequestRPC'] _JINASINGLEDATAREQUESTRPC = DESCRIPTOR.services_by_name['JinaSingleDataRequestRPC'] -_JINASINGLEDOCUMENTREQUESTRPC = DESCRIPTOR.services_by_name['JinaSingleDocumentRequestRPC'] +_JINASINGLEDOCUMENTREQUESTRPC = DESCRIPTOR.services_by_name[ + 'JinaSingleDocumentRequestRPC' +] _JINARPC = DESCRIPTOR.services_by_name['JinaRPC'] _JINADISCOVERENDPOINTSRPC = DESCRIPTOR.services_by_name['JinaDiscoverEndpointsRPC'] _JINAGATEWAYDRYRUNRPC = DESCRIPTOR.services_by_name['JinaGatewayDryRunRPC'] _JINAINFORPC = DESCRIPTOR.services_by_name['JinaInfoRPC'] _JINAEXECUTORSNAPSHOT = DESCRIPTOR.services_by_name['JinaExecutorSnapshot'] -_JINAEXECUTORSNAPSHOTPROGRESS = DESCRIPTOR.services_by_name['JinaExecutorSnapshotProgress'] +_JINAEXECUTORSNAPSHOTPROGRESS = DESCRIPTOR.services_by_name[ + 'JinaExecutorSnapshotProgress' +] _JINAEXECUTORRESTORE = DESCRIPTOR.services_by_name['JinaExecutorRestore'] -_JINAEXECUTORRESTOREPROGRESS = DESCRIPTOR.services_by_name['JinaExecutorRestoreProgress'] +_JINAEXECUTORRESTOREPROGRESS = DESCRIPTOR.services_by_name[ + 'JinaExecutorRestoreProgress' +] if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _JINAINFOPROTO_JINAENTRY._options = None - _JINAINFOPROTO_JINAENTRY._serialized_options = b'8\001' - _JINAINFOPROTO_ENVSENTRY._options = None - _JINAINFOPROTO_ENVSENTRY._serialized_options = b'8\001' - _ROUTEPROTO._serialized_start=129 - _ROUTEPROTO._serialized_end=288 - _JINAINFOPROTO._serialized_start=291 - _JINAINFOPROTO._serialized_end=486 - _JINAINFOPROTO_JINAENTRY._serialized_start=398 - _JINAINFOPROTO_JINAENTRY._serialized_end=441 - _JINAINFOPROTO_ENVSENTRY._serialized_start=443 - _JINAINFOPROTO_ENVSENTRY._serialized_end=486 - _HEADERPROTO._serialized_start=489 - _HEADERPROTO._serialized_end=687 - _ENDPOINTSPROTO._serialized_start=689 - _ENDPOINTSPROTO._serialized_end=791 - _STATUSPROTO._serialized_start=794 - _STATUSPROTO._serialized_end=1043 - _STATUSPROTO_EXCEPTIONPROTO._serialized_start=927 - _STATUSPROTO_EXCEPTIONPROTO._serialized_end=1005 - _STATUSPROTO_STATUSCODE._serialized_start=1007 - _STATUSPROTO_STATUSCODE._serialized_end=1043 - _RELATEDENTITY._serialized_start=1045 - _RELATEDENTITY._serialized_end=1139 - _DATAREQUESTPROTO._serialized_start=1142 - _DATAREQUESTPROTO._serialized_end=1424 - _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_start=1331 - _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_end=1424 - _SINGLEDOCUMENTREQUESTPROTO._serialized_start=1427 - _SINGLEDOCUMENTREQUESTPROTO._serialized_end=1607 - _DATAREQUESTPROTOWODATA._serialized_start=1610 - _DATAREQUESTPROTOWODATA._serialized_end=1748 - _DATAREQUESTLISTPROTO._serialized_start=1750 - _DATAREQUESTLISTPROTO._serialized_end=1814 - _SNAPSHOTID._serialized_start=1816 - _SNAPSHOTID._serialized_end=1843 - _RESTOREID._serialized_start=1845 - _RESTOREID._serialized_end=1871 - _SNAPSHOTSTATUSPROTO._serialized_start=1874 - _SNAPSHOTSTATUSPROTO._serialized_end=2113 - _SNAPSHOTSTATUSPROTO_STATUS._serialized_start=2000 - _SNAPSHOTSTATUSPROTO_STATUS._serialized_end=2113 - _RESTORESNAPSHOTSTATUSPROTO._serialized_start=2116 - _RESTORESNAPSHOTSTATUSPROTO._serialized_end=2318 - _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_start=2232 - _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_end=2318 - _RESTORESNAPSHOTCOMMAND._serialized_start=2320 - _RESTORESNAPSHOTCOMMAND._serialized_end=2367 - _JINADATAREQUESTRPC._serialized_start=2369 - _JINADATAREQUESTRPC._serialized_end=2459 - _JINASINGLEDATAREQUESTRPC._serialized_start=2461 - _JINASINGLEDATAREQUESTRPC._serialized_end=2560 - _JINASINGLEDOCUMENTREQUESTRPC._serialized_start=2562 - _JINASINGLEDOCUMENTREQUESTRPC._serialized_end=2678 - _JINARPC._serialized_start=2680 - _JINARPC._serialized_end=2751 - _JINADISCOVERENDPOINTSRPC._serialized_start=2753 - _JINADISCOVERENDPOINTSRPC._serialized_end=2849 - _JINAGATEWAYDRYRUNRPC._serialized_start=2851 - _JINAGATEWAYDRYRUNRPC._serialized_end=2929 - _JINAINFORPC._serialized_start=2931 - _JINAINFORPC._serialized_end=3002 - _JINAEXECUTORSNAPSHOT._serialized_start=3004 - _JINAEXECUTORSNAPSHOT._serialized_end=3091 - _JINAEXECUTORSNAPSHOTPROGRESS._serialized_start=3093 - _JINAEXECUTORSNAPSHOTPROGRESS._serialized_end=3189 - _JINAEXECUTORRESTORE._serialized_start=3191 - _JINAEXECUTORRESTORE._serialized_end=3289 - _JINAEXECUTORRESTOREPROGRESS._serialized_start=3291 - _JINAEXECUTORRESTOREPROGRESS._serialized_end=3391 + DESCRIPTOR._options = None + _JINAINFOPROTO_JINAENTRY._options = None + _JINAINFOPROTO_JINAENTRY._serialized_options = b'8\001' + _JINAINFOPROTO_ENVSENTRY._options = None + _JINAINFOPROTO_ENVSENTRY._serialized_options = b'8\001' + _ROUTEPROTO._serialized_start = 129 + _ROUTEPROTO._serialized_end = 288 + _JINAINFOPROTO._serialized_start = 291 + _JINAINFOPROTO._serialized_end = 486 + _JINAINFOPROTO_JINAENTRY._serialized_start = 398 + _JINAINFOPROTO_JINAENTRY._serialized_end = 441 + _JINAINFOPROTO_ENVSENTRY._serialized_start = 443 + _JINAINFOPROTO_ENVSENTRY._serialized_end = 486 + _HEADERPROTO._serialized_start = 489 + _HEADERPROTO._serialized_end = 687 + _ENDPOINTSPROTO._serialized_start = 689 + _ENDPOINTSPROTO._serialized_end = 791 + _STATUSPROTO._serialized_start = 794 + _STATUSPROTO._serialized_end = 1043 + _STATUSPROTO_EXCEPTIONPROTO._serialized_start = 927 + _STATUSPROTO_EXCEPTIONPROTO._serialized_end = 1005 + _STATUSPROTO_STATUSCODE._serialized_start = 1007 + _STATUSPROTO_STATUSCODE._serialized_end = 1043 + _RELATEDENTITY._serialized_start = 1045 + _RELATEDENTITY._serialized_end = 1139 + _DATAREQUESTPROTO._serialized_start = 1142 + _DATAREQUESTPROTO._serialized_end = 1424 + _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_start = 1331 + _DATAREQUESTPROTO_DATACONTENTPROTO._serialized_end = 1424 + _SINGLEDOCUMENTREQUESTPROTO._serialized_start = 1427 + _SINGLEDOCUMENTREQUESTPROTO._serialized_end = 1607 + _DATAREQUESTPROTOWODATA._serialized_start = 1610 + _DATAREQUESTPROTOWODATA._serialized_end = 1748 + _DATAREQUESTLISTPROTO._serialized_start = 1750 + _DATAREQUESTLISTPROTO._serialized_end = 1814 + _SNAPSHOTID._serialized_start = 1816 + _SNAPSHOTID._serialized_end = 1843 + _RESTOREID._serialized_start = 1845 + _RESTOREID._serialized_end = 1871 + _SNAPSHOTSTATUSPROTO._serialized_start = 1874 + _SNAPSHOTSTATUSPROTO._serialized_end = 2113 + _SNAPSHOTSTATUSPROTO_STATUS._serialized_start = 2000 + _SNAPSHOTSTATUSPROTO_STATUS._serialized_end = 2113 + _RESTORESNAPSHOTSTATUSPROTO._serialized_start = 2116 + _RESTORESNAPSHOTSTATUSPROTO._serialized_end = 2318 + _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_start = 2232 + _RESTORESNAPSHOTSTATUSPROTO_STATUS._serialized_end = 2318 + _RESTORESNAPSHOTCOMMAND._serialized_start = 2320 + _RESTORESNAPSHOTCOMMAND._serialized_end = 2367 + _JINADATAREQUESTRPC._serialized_start = 2369 + _JINADATAREQUESTRPC._serialized_end = 2459 + _JINASINGLEDATAREQUESTRPC._serialized_start = 2461 + _JINASINGLEDATAREQUESTRPC._serialized_end = 2560 + _JINASINGLEDOCUMENTREQUESTRPC._serialized_start = 2562 + _JINASINGLEDOCUMENTREQUESTRPC._serialized_end = 2678 + _JINARPC._serialized_start = 2680 + _JINARPC._serialized_end = 2751 + _JINADISCOVERENDPOINTSRPC._serialized_start = 2753 + _JINADISCOVERENDPOINTSRPC._serialized_end = 2849 + _JINAGATEWAYDRYRUNRPC._serialized_start = 2851 + _JINAGATEWAYDRYRUNRPC._serialized_end = 2929 + _JINAINFORPC._serialized_start = 2931 + _JINAINFORPC._serialized_end = 3002 + _JINAEXECUTORSNAPSHOT._serialized_start = 3004 + _JINAEXECUTORSNAPSHOT._serialized_end = 3091 + _JINAEXECUTORSNAPSHOTPROGRESS._serialized_start = 3093 + _JINAEXECUTORSNAPSHOTPROGRESS._serialized_end = 3189 + _JINAEXECUTORRESTORE._serialized_start = 3191 + _JINAEXECUTORRESTORE._serialized_end = 3289 + _JINAEXECUTORRESTOREPROGRESS._serialized_start = 3291 + _JINAEXECUTORRESTOREPROGRESS._serialized_end = 3391 # @@protoc_insertion_point(module_scope) diff --git a/jina/proto/docarray_v2/pb2/jina_pb2_grpc.py b/jina/proto/docarray_v2/pb2/jina_pb2_grpc.py index f52ce19e69412..f571beae83675 100644 --- a/jina/proto/docarray_v2/pb2/jina_pb2_grpc.py +++ b/jina/proto/docarray_v2/pb2/jina_pb2_grpc.py @@ -18,10 +18,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.process_data = channel.unary_unary( - '/jina.JinaDataRequestRPC/process_data', - request_serializer=jina__pb2.DataRequestListProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaDataRequestRPC/process_data', + request_serializer=jina__pb2.DataRequestListProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaDataRequestRPCServicer(object): @@ -30,8 +30,7 @@ class JinaDataRequestRPCServicer(object): """ def process_data(self, request, context): - """Used for passing DataRequests to the Executors - """ + """Used for passing DataRequests to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -39,39 +38,52 @@ def process_data(self, request, context): def add_JinaDataRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'process_data': grpc.unary_unary_rpc_method_handler( - servicer.process_data, - request_deserializer=jina__pb2.DataRequestListProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'process_data': grpc.unary_unary_rpc_method_handler( + servicer.process_data, + request_deserializer=jina__pb2.DataRequestListProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaDataRequestRPC', rpc_method_handlers) + 'jina.JinaDataRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaDataRequestRPC(object): """* jina gRPC service for DataRequests. """ @staticmethod - def process_data(request, + def process_data( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaDataRequestRPC/process_data', + '/jina.JinaDataRequestRPC/process_data', jina__pb2.DataRequestListProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaSingleDataRequestRPCStub(object): @@ -87,10 +99,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.process_single_data = channel.unary_unary( - '/jina.JinaSingleDataRequestRPC/process_single_data', - request_serializer=jina__pb2.DataRequestProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaSingleDataRequestRPC/process_single_data', + request_serializer=jina__pb2.DataRequestProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaSingleDataRequestRPCServicer(object): @@ -100,8 +112,7 @@ class JinaSingleDataRequestRPCServicer(object): """ def process_single_data(self, request, context): - """Used for passing DataRequests to the Executors - """ + """Used for passing DataRequests to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -109,18 +120,19 @@ def process_single_data(self, request, context): def add_JinaSingleDataRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'process_single_data': grpc.unary_unary_rpc_method_handler( - servicer.process_single_data, - request_deserializer=jina__pb2.DataRequestProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'process_single_data': grpc.unary_unary_rpc_method_handler( + servicer.process_single_data, + request_deserializer=jina__pb2.DataRequestProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaSingleDataRequestRPC', rpc_method_handlers) + 'jina.JinaSingleDataRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaSingleDataRequestRPC(object): """* jina gRPC service for DataRequests. @@ -128,21 +140,33 @@ class JinaSingleDataRequestRPC(object): """ @staticmethod - def process_single_data(request, + def process_single_data( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaSingleDataRequestRPC/process_single_data', + '/jina.JinaSingleDataRequestRPC/process_single_data', jina__pb2.DataRequestProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaSingleDocumentRequestRPCStub(object): @@ -158,10 +182,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.stream_doc = channel.unary_stream( - '/jina.JinaSingleDocumentRequestRPC/stream_doc', - request_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, - response_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, - ) + '/jina.JinaSingleDocumentRequestRPC/stream_doc', + request_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, + response_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, + ) class JinaSingleDocumentRequestRPCServicer(object): @@ -171,8 +195,7 @@ class JinaSingleDocumentRequestRPCServicer(object): """ def stream_doc(self, request, context): - """Used for streaming one document to the Executors - """ + """Used for streaming one document to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -180,18 +203,19 @@ def stream_doc(self, request, context): def add_JinaSingleDocumentRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'stream_doc': grpc.unary_stream_rpc_method_handler( - servicer.stream_doc, - request_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, - response_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, - ), + 'stream_doc': grpc.unary_stream_rpc_method_handler( + servicer.stream_doc, + request_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, + response_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaSingleDocumentRequestRPC', rpc_method_handlers) + 'jina.JinaSingleDocumentRequestRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaSingleDocumentRequestRPC(object): """* jina gRPC service for DataRequests. @@ -199,21 +223,33 @@ class JinaSingleDocumentRequestRPC(object): """ @staticmethod - def stream_doc(request, + def stream_doc( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream(request, target, '/jina.JinaSingleDocumentRequestRPC/stream_doc', + '/jina.JinaSingleDocumentRequestRPC/stream_doc', jina__pb2.SingleDocumentRequestProto.SerializeToString, jina__pb2.SingleDocumentRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaRPCStub(object): @@ -228,10 +264,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.Call = channel.stream_stream( - '/jina.JinaRPC/Call', - request_serializer=jina__pb2.DataRequestProto.SerializeToString, - response_deserializer=jina__pb2.DataRequestProto.FromString, - ) + '/jina.JinaRPC/Call', + request_serializer=jina__pb2.DataRequestProto.SerializeToString, + response_deserializer=jina__pb2.DataRequestProto.FromString, + ) class JinaRPCServicer(object): @@ -240,8 +276,7 @@ class JinaRPCServicer(object): """ def Call(self, request_iterator, context): - """Pass in a Request and a filled Request with matches will be returned. - """ + """Pass in a Request and a filled Request with matches will be returned.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') @@ -249,39 +284,52 @@ def Call(self, request_iterator, context): def add_JinaRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'Call': grpc.stream_stream_rpc_method_handler( - servicer.Call, - request_deserializer=jina__pb2.DataRequestProto.FromString, - response_serializer=jina__pb2.DataRequestProto.SerializeToString, - ), + 'Call': grpc.stream_stream_rpc_method_handler( + servicer.Call, + request_deserializer=jina__pb2.DataRequestProto.FromString, + response_serializer=jina__pb2.DataRequestProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaRPC', rpc_method_handlers) + 'jina.JinaRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaRPC(object): """* jina streaming gRPC service. """ @staticmethod - def Call(request_iterator, + def Call( + request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.stream_stream( + request_iterator, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream(request_iterator, target, '/jina.JinaRPC/Call', + '/jina.JinaRPC/Call', jina__pb2.DataRequestProto.SerializeToString, jina__pb2.DataRequestProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaDiscoverEndpointsRPCStub(object): @@ -296,10 +344,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.endpoint_discovery = channel.unary_unary( - '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.EndpointsProto.FromString, - ) + '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.EndpointsProto.FromString, + ) class JinaDiscoverEndpointsRPCServicer(object): @@ -316,39 +364,52 @@ def endpoint_discovery(self, request, context): def add_JinaDiscoverEndpointsRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'endpoint_discovery': grpc.unary_unary_rpc_method_handler( - servicer.endpoint_discovery, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.EndpointsProto.SerializeToString, - ), + 'endpoint_discovery': grpc.unary_unary_rpc_method_handler( + servicer.endpoint_discovery, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.EndpointsProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaDiscoverEndpointsRPC', rpc_method_handlers) + 'jina.JinaDiscoverEndpointsRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaDiscoverEndpointsRPC(object): """* jina gRPC service to expose Endpoints from Executors. """ @staticmethod - def endpoint_discovery(request, + def endpoint_discovery( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', + '/jina.JinaDiscoverEndpointsRPC/endpoint_discovery', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.EndpointsProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaGatewayDryRunRPCStub(object): @@ -363,10 +424,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.dry_run = channel.unary_unary( - '/jina.JinaGatewayDryRunRPC/dry_run', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.StatusProto.FromString, - ) + '/jina.JinaGatewayDryRunRPC/dry_run', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.StatusProto.FromString, + ) class JinaGatewayDryRunRPCServicer(object): @@ -383,39 +444,52 @@ def dry_run(self, request, context): def add_JinaGatewayDryRunRPCServicer_to_server(servicer, server): rpc_method_handlers = { - 'dry_run': grpc.unary_unary_rpc_method_handler( - servicer.dry_run, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.StatusProto.SerializeToString, - ), + 'dry_run': grpc.unary_unary_rpc_method_handler( + servicer.dry_run, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.StatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaGatewayDryRunRPC', rpc_method_handlers) + 'jina.JinaGatewayDryRunRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaGatewayDryRunRPC(object): """* jina gRPC service to expose Endpoints from Executors. """ @staticmethod - def dry_run(request, + def dry_run( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaGatewayDryRunRPC/dry_run', + '/jina.JinaGatewayDryRunRPC/dry_run', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.StatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaInfoRPCStub(object): @@ -430,10 +504,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self._status = channel.unary_unary( - '/jina.JinaInfoRPC/_status', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.JinaInfoProto.FromString, - ) + '/jina.JinaInfoRPC/_status', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.JinaInfoProto.FromString, + ) class JinaInfoRPCServicer(object): @@ -450,39 +524,52 @@ def _status(self, request, context): def add_JinaInfoRPCServicer_to_server(servicer, server): rpc_method_handlers = { - '_status': grpc.unary_unary_rpc_method_handler( - servicer._status, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.JinaInfoProto.SerializeToString, - ), + '_status': grpc.unary_unary_rpc_method_handler( + servicer._status, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.JinaInfoProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaInfoRPC', rpc_method_handlers) + 'jina.JinaInfoRPC', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaInfoRPC(object): """* jina gRPC service to expose information about running jina version and environment. """ @staticmethod - def _status(request, + def _status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaInfoRPC/_status', + '/jina.JinaInfoRPC/_status', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.JinaInfoProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorSnapshotStub(object): @@ -497,10 +584,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.snapshot = channel.unary_unary( - '/jina.JinaExecutorSnapshot/snapshot', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=jina__pb2.SnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorSnapshot/snapshot', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=jina__pb2.SnapshotStatusProto.FromString, + ) class JinaExecutorSnapshotServicer(object): @@ -517,39 +604,52 @@ def snapshot(self, request, context): def add_JinaExecutorSnapshotServicer_to_server(servicer, server): rpc_method_handlers = { - 'snapshot': grpc.unary_unary_rpc_method_handler( - servicer.snapshot, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, - ), + 'snapshot': grpc.unary_unary_rpc_method_handler( + servicer.snapshot, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorSnapshot', rpc_method_handlers) + 'jina.JinaExecutorSnapshot', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorSnapshot(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def snapshot(request, + def snapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorSnapshot/snapshot', + '/jina.JinaExecutorSnapshot/snapshot', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, jina__pb2.SnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorSnapshotProgressStub(object): @@ -564,10 +664,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.snapshot_status = channel.unary_unary( - '/jina.JinaExecutorSnapshotProgress/snapshot_status', - request_serializer=jina__pb2.SnapshotId.SerializeToString, - response_deserializer=jina__pb2.SnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorSnapshotProgress/snapshot_status', + request_serializer=jina__pb2.SnapshotId.SerializeToString, + response_deserializer=jina__pb2.SnapshotStatusProto.FromString, + ) class JinaExecutorSnapshotProgressServicer(object): @@ -584,39 +684,52 @@ def snapshot_status(self, request, context): def add_JinaExecutorSnapshotProgressServicer_to_server(servicer, server): rpc_method_handlers = { - 'snapshot_status': grpc.unary_unary_rpc_method_handler( - servicer.snapshot_status, - request_deserializer=jina__pb2.SnapshotId.FromString, - response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, - ), + 'snapshot_status': grpc.unary_unary_rpc_method_handler( + servicer.snapshot_status, + request_deserializer=jina__pb2.SnapshotId.FromString, + response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorSnapshotProgress', rpc_method_handlers) + 'jina.JinaExecutorSnapshotProgress', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorSnapshotProgress(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def snapshot_status(request, + def snapshot_status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorSnapshotProgress/snapshot_status', + '/jina.JinaExecutorSnapshotProgress/snapshot_status', jina__pb2.SnapshotId.SerializeToString, jina__pb2.SnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorRestoreStub(object): @@ -631,10 +744,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.restore = channel.unary_unary( - '/jina.JinaExecutorRestore/restore', - request_serializer=jina__pb2.RestoreSnapshotCommand.SerializeToString, - response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorRestore/restore', + request_serializer=jina__pb2.RestoreSnapshotCommand.SerializeToString, + response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, + ) class JinaExecutorRestoreServicer(object): @@ -651,39 +764,52 @@ def restore(self, request, context): def add_JinaExecutorRestoreServicer_to_server(servicer, server): rpc_method_handlers = { - 'restore': grpc.unary_unary_rpc_method_handler( - servicer.restore, - request_deserializer=jina__pb2.RestoreSnapshotCommand.FromString, - response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, - ), + 'restore': grpc.unary_unary_rpc_method_handler( + servicer.restore, + request_deserializer=jina__pb2.RestoreSnapshotCommand.FromString, + response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorRestore', rpc_method_handlers) + 'jina.JinaExecutorRestore', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorRestore(object): """* jina gRPC service to trigger a restore at the Executor Runtime. """ @staticmethod - def restore(request, + def restore( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorRestore/restore', + '/jina.JinaExecutorRestore/restore', jina__pb2.RestoreSnapshotCommand.SerializeToString, jina__pb2.RestoreSnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) class JinaExecutorRestoreProgressStub(object): @@ -698,10 +824,10 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.restore_status = channel.unary_unary( - '/jina.JinaExecutorRestoreProgress/restore_status', - request_serializer=jina__pb2.RestoreId.SerializeToString, - response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, - ) + '/jina.JinaExecutorRestoreProgress/restore_status', + request_serializer=jina__pb2.RestoreId.SerializeToString, + response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString, + ) class JinaExecutorRestoreProgressServicer(object): @@ -718,36 +844,49 @@ def restore_status(self, request, context): def add_JinaExecutorRestoreProgressServicer_to_server(servicer, server): rpc_method_handlers = { - 'restore_status': grpc.unary_unary_rpc_method_handler( - servicer.restore_status, - request_deserializer=jina__pb2.RestoreId.FromString, - response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, - ), + 'restore_status': grpc.unary_unary_rpc_method_handler( + servicer.restore_status, + request_deserializer=jina__pb2.RestoreId.FromString, + response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'jina.JinaExecutorRestoreProgress', rpc_method_handlers) + 'jina.JinaExecutorRestoreProgress', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class JinaExecutorRestoreProgress(object): """* jina gRPC service to trigger a snapshot at the Executor Runtime. """ @staticmethod - def restore_status(request, + def restore_status( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/jina.JinaExecutorRestoreProgress/restore_status', + '/jina.JinaExecutorRestoreProgress/restore_status', jina__pb2.RestoreId.SerializeToString, jina__pb2.RestoreSnapshotStatusProto.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/jina/proto/serializer.py b/jina/proto/serializer.py index 853b8c4e7af54..e600ebbd0f141 100644 --- a/jina/proto/serializer.py +++ b/jina/proto/serializer.py @@ -308,6 +308,7 @@ def FromString(x: bytes): # noqa: DAR201 """ import os + if x: os.environ['JINA_GRPC_RECV_BYTES'] = str( len(x) + int(os.environ.get('JINA_GRPC_RECV_BYTES', 0)) @@ -315,4 +316,3 @@ def FromString(x: bytes): return SingleDocumentRequest(x) else: return SingleDocumentRequest() - diff --git a/jina/resources/base-gateway/gateway.py b/jina/resources/base-gateway/gateway.py index db80bdd57651e..bed2706479d2f 100644 --- a/jina/resources/base-gateway/gateway.py +++ b/jina/resources/base-gateway/gateway.py @@ -1,4 +1,5 @@ from jina.serve.runtimes.gateway.gateway import BaseGateway + class PlaceHolderGateway(BaseGateway): - pass \ No newline at end of file + pass diff --git a/jina/resources/project-template/deployment/client.py b/jina/resources/project-template/deployment/client.py index 4e00143740ece..23899eccfb5d8 100644 --- a/jina/resources/project-template/deployment/client.py +++ b/jina/resources/project-template/deployment/client.py @@ -4,5 +4,7 @@ if __name__ == '__main__': c = Client(host='grpc://0.0.0.0:54321') - da = c.post('/', DocList[TextDoc]([TextDoc(), TextDoc()]), return_type=DocList[TextDoc]) + da = c.post( + '/', DocList[TextDoc]([TextDoc(), TextDoc()]), return_type=DocList[TextDoc] + ) print(da.text) diff --git a/jina/resources/project-template/flow/client.py b/jina/resources/project-template/flow/client.py index 4e00143740ece..23899eccfb5d8 100644 --- a/jina/resources/project-template/flow/client.py +++ b/jina/resources/project-template/flow/client.py @@ -4,5 +4,7 @@ if __name__ == '__main__': c = Client(host='grpc://0.0.0.0:54321') - da = c.post('/', DocList[TextDoc]([TextDoc(), TextDoc()]), return_type=DocList[TextDoc]) + da = c.post( + '/', DocList[TextDoc]([TextDoc(), TextDoc()]), return_type=DocList[TextDoc] + ) print(da.text) diff --git a/jina/resources/project-template/flow/executor1/executor.py b/jina/resources/project-template/flow/executor1/executor.py index 228898398908c..a8c9855c007f6 100644 --- a/jina/resources/project-template/flow/executor1/executor.py +++ b/jina/resources/project-template/flow/executor1/executor.py @@ -8,4 +8,4 @@ class MyExecutor(Executor): def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[TextDoc]: docs[0].text = 'hello, world!' docs[1].text = 'goodbye, world!' - return docs \ No newline at end of file + return docs diff --git a/jina/serve/consensus/add_voter/pb/add_voter_pb2.py b/jina/serve/consensus/add_voter/pb/add_voter_pb2.py index ee5528f3fe13e..7ed5de5c3a30e 100644 --- a/jina/serve/consensus/add_voter/pb/add_voter_pb2.py +++ b/jina/serve/consensus/add_voter/pb/add_voter_pb2.py @@ -6,28 +6,29 @@ from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0f\x61\x64\x64_voter.proto\"-\n\rAwaitResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x04\"\x10\n\x0e\x46orgetResponse\"!\n\x06\x46uture\x12\x17\n\x0foperation_token\x18\x01 \x01(\t\"F\n\x0f\x41\x64\x64VoterRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x16\n\x0eprevious_index\x18\x03 \x01(\x04\x32~\n\tRaftAdmin\x12\'\n\x08\x41\x64\x64Voter\x12\x10.AddVoterRequest\x1a\x07.Future\"\x00\x12\"\n\x05\x41wait\x12\x07.Future\x1a\x0e.AwaitResponse\"\x00\x12$\n\x06\x46orget\x12\x07.Future\x1a\x0f.ForgetResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x0f\x61\x64\x64_voter.proto\"-\n\rAwaitResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x04\"\x10\n\x0e\x46orgetResponse\"!\n\x06\x46uture\x12\x17\n\x0foperation_token\x18\x01 \x01(\t\"F\n\x0f\x41\x64\x64VoterRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x16\n\x0eprevious_index\x18\x03 \x01(\x04\x32~\n\tRaftAdmin\x12\'\n\x08\x41\x64\x64Voter\x12\x10.AddVoterRequest\x1a\x07.Future\"\x00\x12\"\n\x05\x41wait\x12\x07.Future\x1a\x0e.AwaitResponse\"\x00\x12$\n\x06\x46orget\x12\x07.Future\x1a\x0f.ForgetResponse\"\x00\x62\x06proto3' +) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'add_voter_pb2', globals()) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _AWAITRESPONSE._serialized_start=19 - _AWAITRESPONSE._serialized_end=64 - _FORGETRESPONSE._serialized_start=66 - _FORGETRESPONSE._serialized_end=82 - _FUTURE._serialized_start=84 - _FUTURE._serialized_end=117 - _ADDVOTERREQUEST._serialized_start=119 - _ADDVOTERREQUEST._serialized_end=189 - _RAFTADMIN._serialized_start=191 - _RAFTADMIN._serialized_end=317 + DESCRIPTOR._options = None + _AWAITRESPONSE._serialized_start = 19 + _AWAITRESPONSE._serialized_end = 64 + _FORGETRESPONSE._serialized_start = 66 + _FORGETRESPONSE._serialized_end = 82 + _FUTURE._serialized_start = 84 + _FUTURE._serialized_end = 117 + _ADDVOTERREQUEST._serialized_start = 119 + _ADDVOTERREQUEST._serialized_end = 189 + _RAFTADMIN._serialized_start = 191 + _RAFTADMIN._serialized_end = 317 # @@protoc_insertion_point(module_scope) diff --git a/jina/serve/consensus/add_voter/pb/add_voter_pb2_grpc.py b/jina/serve/consensus/add_voter/pb/add_voter_pb2_grpc.py index e59cf3319fefd..00f3f8ef25197 100644 --- a/jina/serve/consensus/add_voter/pb/add_voter_pb2_grpc.py +++ b/jina/serve/consensus/add_voter/pb/add_voter_pb2_grpc.py @@ -15,20 +15,20 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.AddVoter = channel.unary_unary( - '/RaftAdmin/AddVoter', - request_serializer=add__voter__pb2.AddVoterRequest.SerializeToString, - response_deserializer=add__voter__pb2.Future.FromString, - ) + '/RaftAdmin/AddVoter', + request_serializer=add__voter__pb2.AddVoterRequest.SerializeToString, + response_deserializer=add__voter__pb2.Future.FromString, + ) self.Await = channel.unary_unary( - '/RaftAdmin/Await', - request_serializer=add__voter__pb2.Future.SerializeToString, - response_deserializer=add__voter__pb2.AwaitResponse.FromString, - ) + '/RaftAdmin/Await', + request_serializer=add__voter__pb2.Future.SerializeToString, + response_deserializer=add__voter__pb2.AwaitResponse.FromString, + ) self.Forget = channel.unary_unary( - '/RaftAdmin/Forget', - request_serializer=add__voter__pb2.Future.SerializeToString, - response_deserializer=add__voter__pb2.ForgetResponse.FromString, - ) + '/RaftAdmin/Forget', + request_serializer=add__voter__pb2.Future.SerializeToString, + response_deserializer=add__voter__pb2.ForgetResponse.FromString, + ) class RaftAdminServicer(object): @@ -55,78 +55,115 @@ def Forget(self, request, context): def add_RaftAdminServicer_to_server(servicer, server): rpc_method_handlers = { - 'AddVoter': grpc.unary_unary_rpc_method_handler( - servicer.AddVoter, - request_deserializer=add__voter__pb2.AddVoterRequest.FromString, - response_serializer=add__voter__pb2.Future.SerializeToString, - ), - 'Await': grpc.unary_unary_rpc_method_handler( - servicer.Await, - request_deserializer=add__voter__pb2.Future.FromString, - response_serializer=add__voter__pb2.AwaitResponse.SerializeToString, - ), - 'Forget': grpc.unary_unary_rpc_method_handler( - servicer.Forget, - request_deserializer=add__voter__pb2.Future.FromString, - response_serializer=add__voter__pb2.ForgetResponse.SerializeToString, - ), + 'AddVoter': grpc.unary_unary_rpc_method_handler( + servicer.AddVoter, + request_deserializer=add__voter__pb2.AddVoterRequest.FromString, + response_serializer=add__voter__pb2.Future.SerializeToString, + ), + 'Await': grpc.unary_unary_rpc_method_handler( + servicer.Await, + request_deserializer=add__voter__pb2.Future.FromString, + response_serializer=add__voter__pb2.AwaitResponse.SerializeToString, + ), + 'Forget': grpc.unary_unary_rpc_method_handler( + servicer.Forget, + request_deserializer=add__voter__pb2.Future.FromString, + response_serializer=add__voter__pb2.ForgetResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'RaftAdmin', rpc_method_handlers) + 'RaftAdmin', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class RaftAdmin(object): """Missing associated documentation comment in .proto file.""" @staticmethod - def AddVoter(request, + def AddVoter( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/RaftAdmin/AddVoter', + '/RaftAdmin/AddVoter', add__voter__pb2.AddVoterRequest.SerializeToString, add__voter__pb2.Future.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) @staticmethod - def Await(request, + def Await( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/RaftAdmin/Await', + '/RaftAdmin/Await', add__voter__pb2.Future.SerializeToString, add__voter__pb2.AwaitResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) @staticmethod - def Forget(request, + def Forget( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/RaftAdmin/Forget', + '/RaftAdmin/Forget', add__voter__pb2.Future.SerializeToString, add__voter__pb2.ForgetResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/jina/serve/consensus/add_voter/pb2/add_voter_pb2.py b/jina/serve/consensus/add_voter/pb2/add_voter_pb2.py index 7ec7594f2afa7..77927fab05ce9 100644 --- a/jina/serve/consensus/add_voter/pb2/add_voter_pb2.py +++ b/jina/serve/consensus/add_voter/pb2/add_voter_pb2.py @@ -7,61 +7,77 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0f\x61\x64\x64_voter.proto\"-\n\rAwaitResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x04\"\x10\n\x0e\x46orgetResponse\"!\n\x06\x46uture\x12\x17\n\x0foperation_token\x18\x01 \x01(\t\"F\n\x0f\x41\x64\x64VoterRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x16\n\x0eprevious_index\x18\x03 \x01(\x04\x32~\n\tRaftAdmin\x12\'\n\x08\x41\x64\x64Voter\x12\x10.AddVoterRequest\x1a\x07.Future\"\x00\x12\"\n\x05\x41wait\x12\x07.Future\x1a\x0e.AwaitResponse\"\x00\x12$\n\x06\x46orget\x12\x07.Future\x1a\x0f.ForgetResponse\"\x00\x62\x06proto3') - +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x0f\x61\x64\x64_voter.proto\"-\n\rAwaitResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x04\"\x10\n\x0e\x46orgetResponse\"!\n\x06\x46uture\x12\x17\n\x0foperation_token\x18\x01 \x01(\t\"F\n\x0f\x41\x64\x64VoterRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x16\n\x0eprevious_index\x18\x03 \x01(\x04\x32~\n\tRaftAdmin\x12\'\n\x08\x41\x64\x64Voter\x12\x10.AddVoterRequest\x1a\x07.Future\"\x00\x12\"\n\x05\x41wait\x12\x07.Future\x1a\x0e.AwaitResponse\"\x00\x12$\n\x06\x46orget\x12\x07.Future\x1a\x0f.ForgetResponse\"\x00\x62\x06proto3' +) _AWAITRESPONSE = DESCRIPTOR.message_types_by_name['AwaitResponse'] _FORGETRESPONSE = DESCRIPTOR.message_types_by_name['ForgetResponse'] _FUTURE = DESCRIPTOR.message_types_by_name['Future'] _ADDVOTERREQUEST = DESCRIPTOR.message_types_by_name['AddVoterRequest'] -AwaitResponse = _reflection.GeneratedProtocolMessageType('AwaitResponse', (_message.Message,), { - 'DESCRIPTOR' : _AWAITRESPONSE, - '__module__' : 'add_voter_pb2' - # @@protoc_insertion_point(class_scope:AwaitResponse) - }) +AwaitResponse = _reflection.GeneratedProtocolMessageType( + 'AwaitResponse', + (_message.Message,), + { + 'DESCRIPTOR': _AWAITRESPONSE, + '__module__': 'add_voter_pb2', + # @@protoc_insertion_point(class_scope:AwaitResponse) + }, +) _sym_db.RegisterMessage(AwaitResponse) -ForgetResponse = _reflection.GeneratedProtocolMessageType('ForgetResponse', (_message.Message,), { - 'DESCRIPTOR' : _FORGETRESPONSE, - '__module__' : 'add_voter_pb2' - # @@protoc_insertion_point(class_scope:ForgetResponse) - }) +ForgetResponse = _reflection.GeneratedProtocolMessageType( + 'ForgetResponse', + (_message.Message,), + { + 'DESCRIPTOR': _FORGETRESPONSE, + '__module__': 'add_voter_pb2', + # @@protoc_insertion_point(class_scope:ForgetResponse) + }, +) _sym_db.RegisterMessage(ForgetResponse) -Future = _reflection.GeneratedProtocolMessageType('Future', (_message.Message,), { - 'DESCRIPTOR' : _FUTURE, - '__module__' : 'add_voter_pb2' - # @@protoc_insertion_point(class_scope:Future) - }) +Future = _reflection.GeneratedProtocolMessageType( + 'Future', + (_message.Message,), + { + 'DESCRIPTOR': _FUTURE, + '__module__': 'add_voter_pb2', + # @@protoc_insertion_point(class_scope:Future) + }, +) _sym_db.RegisterMessage(Future) -AddVoterRequest = _reflection.GeneratedProtocolMessageType('AddVoterRequest', (_message.Message,), { - 'DESCRIPTOR' : _ADDVOTERREQUEST, - '__module__' : 'add_voter_pb2' - # @@protoc_insertion_point(class_scope:AddVoterRequest) - }) +AddVoterRequest = _reflection.GeneratedProtocolMessageType( + 'AddVoterRequest', + (_message.Message,), + { + 'DESCRIPTOR': _ADDVOTERREQUEST, + '__module__': 'add_voter_pb2', + # @@protoc_insertion_point(class_scope:AddVoterRequest) + }, +) _sym_db.RegisterMessage(AddVoterRequest) _RAFTADMIN = DESCRIPTOR.services_by_name['RaftAdmin'] if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _AWAITRESPONSE._serialized_start=19 - _AWAITRESPONSE._serialized_end=64 - _FORGETRESPONSE._serialized_start=66 - _FORGETRESPONSE._serialized_end=82 - _FUTURE._serialized_start=84 - _FUTURE._serialized_end=117 - _ADDVOTERREQUEST._serialized_start=119 - _ADDVOTERREQUEST._serialized_end=189 - _RAFTADMIN._serialized_start=191 - _RAFTADMIN._serialized_end=317 + DESCRIPTOR._options = None + _AWAITRESPONSE._serialized_start = 19 + _AWAITRESPONSE._serialized_end = 64 + _FORGETRESPONSE._serialized_start = 66 + _FORGETRESPONSE._serialized_end = 82 + _FUTURE._serialized_start = 84 + _FUTURE._serialized_end = 117 + _ADDVOTERREQUEST._serialized_start = 119 + _ADDVOTERREQUEST._serialized_end = 189 + _RAFTADMIN._serialized_start = 191 + _RAFTADMIN._serialized_end = 317 # @@protoc_insertion_point(module_scope) diff --git a/jina/serve/consensus/add_voter/pb2/add_voter_pb2_grpc.py b/jina/serve/consensus/add_voter/pb2/add_voter_pb2_grpc.py index e59cf3319fefd..00f3f8ef25197 100644 --- a/jina/serve/consensus/add_voter/pb2/add_voter_pb2_grpc.py +++ b/jina/serve/consensus/add_voter/pb2/add_voter_pb2_grpc.py @@ -15,20 +15,20 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.AddVoter = channel.unary_unary( - '/RaftAdmin/AddVoter', - request_serializer=add__voter__pb2.AddVoterRequest.SerializeToString, - response_deserializer=add__voter__pb2.Future.FromString, - ) + '/RaftAdmin/AddVoter', + request_serializer=add__voter__pb2.AddVoterRequest.SerializeToString, + response_deserializer=add__voter__pb2.Future.FromString, + ) self.Await = channel.unary_unary( - '/RaftAdmin/Await', - request_serializer=add__voter__pb2.Future.SerializeToString, - response_deserializer=add__voter__pb2.AwaitResponse.FromString, - ) + '/RaftAdmin/Await', + request_serializer=add__voter__pb2.Future.SerializeToString, + response_deserializer=add__voter__pb2.AwaitResponse.FromString, + ) self.Forget = channel.unary_unary( - '/RaftAdmin/Forget', - request_serializer=add__voter__pb2.Future.SerializeToString, - response_deserializer=add__voter__pb2.ForgetResponse.FromString, - ) + '/RaftAdmin/Forget', + request_serializer=add__voter__pb2.Future.SerializeToString, + response_deserializer=add__voter__pb2.ForgetResponse.FromString, + ) class RaftAdminServicer(object): @@ -55,78 +55,115 @@ def Forget(self, request, context): def add_RaftAdminServicer_to_server(servicer, server): rpc_method_handlers = { - 'AddVoter': grpc.unary_unary_rpc_method_handler( - servicer.AddVoter, - request_deserializer=add__voter__pb2.AddVoterRequest.FromString, - response_serializer=add__voter__pb2.Future.SerializeToString, - ), - 'Await': grpc.unary_unary_rpc_method_handler( - servicer.Await, - request_deserializer=add__voter__pb2.Future.FromString, - response_serializer=add__voter__pb2.AwaitResponse.SerializeToString, - ), - 'Forget': grpc.unary_unary_rpc_method_handler( - servicer.Forget, - request_deserializer=add__voter__pb2.Future.FromString, - response_serializer=add__voter__pb2.ForgetResponse.SerializeToString, - ), + 'AddVoter': grpc.unary_unary_rpc_method_handler( + servicer.AddVoter, + request_deserializer=add__voter__pb2.AddVoterRequest.FromString, + response_serializer=add__voter__pb2.Future.SerializeToString, + ), + 'Await': grpc.unary_unary_rpc_method_handler( + servicer.Await, + request_deserializer=add__voter__pb2.Future.FromString, + response_serializer=add__voter__pb2.AwaitResponse.SerializeToString, + ), + 'Forget': grpc.unary_unary_rpc_method_handler( + servicer.Forget, + request_deserializer=add__voter__pb2.Future.FromString, + response_serializer=add__voter__pb2.ForgetResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'RaftAdmin', rpc_method_handlers) + 'RaftAdmin', rpc_method_handlers + ) server.add_generic_rpc_handlers((generic_handler,)) - # This class is part of an EXPERIMENTAL API. +# This class is part of an EXPERIMENTAL API. class RaftAdmin(object): """Missing associated documentation comment in .proto file.""" @staticmethod - def AddVoter(request, + def AddVoter( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/RaftAdmin/AddVoter', + '/RaftAdmin/AddVoter', add__voter__pb2.AddVoterRequest.SerializeToString, add__voter__pb2.Future.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) @staticmethod - def Await(request, + def Await( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/RaftAdmin/Await', + '/RaftAdmin/Await', add__voter__pb2.Future.SerializeToString, add__voter__pb2.AwaitResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) @staticmethod - def Forget(request, + def Forget( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/RaftAdmin/Forget', + '/RaftAdmin/Forget', add__voter__pb2.Future.SerializeToString, add__voter__pb2.ForgetResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/jina/serve/executors/__init__.py b/jina/serve/executors/__init__.py index a14d64fe22bb6..c5c8f72a8e6c1 100644 --- a/jina/serve/executors/__init__.py +++ b/jina/serve/executors/__init__.py @@ -477,9 +477,11 @@ def _get_endpoint_models_dict(self): 'is_generator': _is_generator, 'is_singleton_doc': _is_singleton_doc, 'parameters': { - 'name': _parameters_model.__name__ - if _parameters_model is not None - else None, + 'name': ( + _parameters_model.__name__ + if _parameters_model is not None + else None + ), 'model': _parameters_model, }, } diff --git a/jina/serve/executors/decorators.py b/jina/serve/executors/decorators.py index 8317b28794761..4034439102478 100644 --- a/jina/serve/executors/decorators.py +++ b/jina/serve/executors/decorators.py @@ -1,4 +1,5 @@ """Decorators and wrappers designed for wrapping :class:`BaseExecutor` functions. """ + import functools import inspect import os @@ -368,7 +369,7 @@ def _inject_owner_attrs( parameters_is_pydantic_model=fn_with_schema.parameters_is_pydantic_model, parameters_model=fn_with_schema.parameters_model, request_schema=request_schema_arg, - response_schema=response_schema_arg + response_schema=response_schema_arg, ) fn_with_schema.validate() diff --git a/jina/serve/executors/metas.py b/jina/serve/executors/metas.py index d477cee8d5588..7dc265d84a139 100644 --- a/jina/serve/executors/metas.py +++ b/jina/serve/executors/metas.py @@ -24,7 +24,15 @@ def get_executor_taboo(): Returns a set of executor meta variables :return: set of executor meta variables """ - taboo = {'self', 'args', 'kwargs', 'metas', 'requests', 'runtime_args', 'dynamic_batching'} + taboo = { + 'self', + 'args', + 'kwargs', + 'metas', + 'requests', + 'runtime_args', + 'dynamic_batching', + } _defaults = get_default_metas() taboo.update(_defaults.keys()) return taboo diff --git a/jina/serve/executors/run.py b/jina/serve/executors/run.py index 6901982e6548b..64bf2ef693c81 100644 --- a/jina/serve/executors/run.py +++ b/jina/serve/executors/run.py @@ -14,8 +14,8 @@ def run_raft( - args: 'argparse.Namespace', - is_ready: Union['multiprocessing.Event', 'threading.Event'], + args: 'argparse.Namespace', + is_ready: Union['multiprocessing.Event', 'threading.Event'], ): """Method to run the RAFT @@ -72,15 +72,15 @@ def pascal_case_dict(d): def run( - args: 'argparse.Namespace', - name: str, - runtime_cls: Type[AsyncNewLoopRuntime], - envs: Dict[str, str], - is_started: Union['multiprocessing.Event', 'threading.Event'], - is_shutdown: Union['multiprocessing.Event', 'threading.Event'], - is_ready: Union['multiprocessing.Event', 'threading.Event'], - is_signal_handlers_installed: Union['multiprocessing.Event', 'threading.Event'], - jaml_classes: Optional[Dict] = None, + args: 'argparse.Namespace', + name: str, + runtime_cls: Type[AsyncNewLoopRuntime], + envs: Dict[str, str], + is_started: Union['multiprocessing.Event', 'threading.Event'], + is_shutdown: Union['multiprocessing.Event', 'threading.Event'], + is_ready: Union['multiprocessing.Event', 'threading.Event'], + is_signal_handlers_installed: Union['multiprocessing.Event', 'threading.Event'], + jaml_classes: Optional[Dict] = None, ): """Method representing the :class:`BaseRuntime` activity. @@ -116,12 +116,15 @@ def run( req_handler_cls = None if runtime_cls == 'GatewayRuntime': from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler + req_handler_cls = GatewayRequestHandler elif runtime_cls == 'WorkerRuntime': from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler + req_handler_cls = WorkerRequestHandler elif runtime_cls == 'HeadRuntime': from jina.serve.runtimes.head.request_handling import HeaderRequestHandler + req_handler_cls = HeaderRequestHandler logger = JinaLogger(name, **vars(args)) @@ -142,14 +145,16 @@ def _set_envs(): args=args, req_handler_cls=req_handler_cls, gateway_load_balancer=getattr(args, 'gateway_load_balancer', False), - signal_handlers_installed_event=is_signal_handlers_installed + signal_handlers_installed_event=is_signal_handlers_installed, ) except Exception as ex: logger.error( - f'{ex!r} during {runtime_cls!r} initialization' - + f'\n add "--quiet-error" to suppress the exception details' - if not args.quiet_error - else '', + ( + f'{ex!r} during {runtime_cls!r} initialization' + + f'\n add "--quiet-error" to suppress the exception details' + if not args.quiet_error + else '' + ), exc_info=not args.quiet_error, ) else: @@ -165,10 +170,12 @@ def _set_envs(): logger.debug('process terminated') -def run_stateful(args: 'argparse.Namespace', - name: str, - runtime_cls: Type[AsyncNewLoopRuntime], - envs: Dict[str, str]): +def run_stateful( + args: 'argparse.Namespace', + name: str, + runtime_cls: Type[AsyncNewLoopRuntime], + envs: Dict[str, str], +): """ Method to be called in Docker containers when Stateful Executor is required. This will start 2 processes in the Docker container. @@ -179,6 +186,7 @@ def run_stateful(args: 'argparse.Namespace', """ import signal from jina.jaml import JAML + is_ready = multiprocessing.Event() is_shutdown = multiprocessing.Event() is_started = multiprocessing.Event() diff --git a/jina/serve/networking/__init__.py b/jina/serve/networking/__init__.py index d575c1cf419bc..649b0f3be4055 100644 --- a/jina/serve/networking/__init__.py +++ b/jina/serve/networking/__init__.py @@ -1,5 +1,15 @@ import asyncio -from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Set, Tuple, Union, AsyncGenerator +from typing import ( + TYPE_CHECKING, + Dict, + List, + Optional, + Sequence, + Set, + Tuple, + Union, + AsyncGenerator, +) import grpc from grpc.aio import AioRpcError @@ -48,15 +58,15 @@ class GrpcConnectionPool: K8S_PORT_MONITORING = 9090 def __init__( - self, - runtime_name, - logger: Optional[JinaLogger] = None, - compression: Optional[str] = None, - metrics_registry: Optional['CollectorRegistry'] = None, - meter: Optional['Meter'] = None, - aio_tracing_client_interceptors: Optional[Sequence['ClientInterceptor']] = None, - tracing_client_interceptor: Optional['OpenTelemetryClientInterceptor'] = None, - channel_options: Optional[list] = None, + self, + runtime_name, + logger: Optional[JinaLogger] = None, + compression: Optional[str] = None, + metrics_registry: Optional['CollectorRegistry'] = None, + meter: Optional['Meter'] = None, + aio_tracing_client_interceptors: Optional[Sequence['ClientInterceptor']] = None, + tracing_client_interceptor: Optional['OpenTelemetryClientInterceptor'] = None, + channel_options: Optional[list] = None, ): self._logger = logger or JinaLogger(self.__class__.__name__) self.channel_options = channel_options @@ -69,8 +79,8 @@ def __init__( if metrics_registry: with ImportExtensions( - required=True, - help_text='You need to install the `prometheus_client` to use the montitoring functionality of jina', + required=True, + help_text='You need to install the `prometheus_client` to use the montitoring functionality of jina', ): from prometheus_client import Summary @@ -144,16 +154,16 @@ def __init__( self._deployment_address_map = {} def send_requests( - self, - requests: List[Request], - deployment: str, - head: bool = False, - shard_id: Optional[int] = None, - polling_type: PollingType = PollingType.ANY, - endpoint: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - timeout: Optional[float] = None, - retries: Optional[int] = -1, + self, + requests: List[Request], + deployment: str, + head: bool = False, + shard_id: Optional[int] = None, + polling_type: PollingType = PollingType.ANY, + endpoint: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + timeout: Optional[float] = None, + retries: Optional[int] = -1, ) -> List[asyncio.Task]: """Send a request to target via one or all of the pooled connections, depending on polling_type @@ -195,12 +205,12 @@ def send_requests( return results def send_discover_endpoint( - self, - deployment: str, - head: bool = True, - shard_id: Optional[int] = None, - timeout: Optional[float] = None, - retries: Optional[int] = -1, + self, + deployment: str, + head: bool = True, + shard_id: Optional[int] = None, + timeout: Optional[float] = None, + retries: Optional[int] = -1, ): """Sends a discover Endpoint call to target. @@ -225,15 +235,15 @@ def send_discover_endpoint( return None def send_requests_once( - self, - requests: List[Request], - deployment: str, - metadata: Optional[Dict[str, str]] = None, - head: bool = False, - shard_id: Optional[int] = None, - endpoint: Optional[str] = None, - timeout: Optional[float] = None, - retries: Optional[int] = -1, + self, + requests: List[Request], + deployment: str, + metadata: Optional[Dict[str, str]] = None, + head: bool = False, + shard_id: Optional[int] = None, + endpoint: Optional[str] = None, + timeout: Optional[float] = None, + retries: Optional[int] = -1, ) -> Optional[asyncio.Task]: """Send a request to target via only one of the pooled connections @@ -265,14 +275,14 @@ def send_requests_once( return None def send_single_document_request( - self, - request: SingleDocumentRequest, - deployment: str, - metadata: Optional[Dict[str, str]] = None, - head: bool = False, - endpoint: Optional[str] = None, - timeout: Optional[float] = None, - retries: Optional[int] = -1, + self, + request: SingleDocumentRequest, + deployment: str, + metadata: Optional[Dict[str, str]] = None, + head: bool = False, + endpoint: Optional[str] = None, + timeout: Optional[float] = None, + retries: Optional[int] = -1, ) -> Optional[AsyncGenerator]: """Send a request to target via only one of the pooled connections @@ -297,17 +307,15 @@ def send_single_document_request( ) return result_async_generator else: - self._logger.debug( - f'no available connections for deployment {deployment}' - ) + self._logger.debug(f'no available connections for deployment {deployment}') return None def add_connection( - self, - deployment: str, - address: str, - head: bool = False, - shard_id: Optional[int] = None, + self, + deployment: str, + address: str, + head: bool = False, + shard_id: Optional[int] = None, ): """ Adds a connection for a deployment to this connection pool @@ -326,11 +334,11 @@ def add_connection( self._deployment_address_map[deployment] = address async def remove_connection( - self, - deployment: str, - address: str, - head: bool = False, - shard_id: Optional[int] = None, + self, + deployment: str, + address: str, + head: bool = False, + shard_id: Optional[int] = None, ): """ Removes a connection to a deployment @@ -355,18 +363,18 @@ async def close(self): await self._connections.close() async def _handle_aiorpcerror( - self, - error: AioRpcError, - retry_i: int = 0, - request_id: str = '', - tried_addresses: Set[str] = { - '' - }, # same deployment can have multiple addresses (replicas) - total_num_tries: int = 1, # number of retries + 1 - current_address: str = '', # the specific address that was contacted during this attempt - current_deployment: str = '', # the specific deployment that was contacted during this attempt - connection_list: Optional[_ReplicaList] = None, - task_type: str = 'DataRequest' + self, + error: AioRpcError, + retry_i: int = 0, + request_id: str = '', + tried_addresses: Set[str] = { + '' + }, # same deployment can have multiple addresses (replicas) + total_num_tries: int = 1, # number of retries + 1 + current_address: str = '', # the specific address that was contacted during this attempt + current_deployment: str = '', # the specific deployment that was contacted during this attempt + connection_list: Optional[_ReplicaList] = None, + task_type: str = 'DataRequest', ) -> 'Optional[Union[AioRpcError, InternalNetworkError]]': # connection failures, cancelled requests, and timed out requests should be retried # all other cases should not be retried and will be raised immediately @@ -377,9 +385,14 @@ async def _handle_aiorpcerror( # requests usually gets cancelled when the server shuts down # retries for cancelled requests will hit another replica in K8s skip_resetting = False - if error.code() == grpc.StatusCode.UNAVAILABLE and 'not the leader' in error.details(): - self._logger.debug(f'RAFT node of {current_deployment} is not the leader. Trying next replica, if available.') - skip_resetting = True # no need to reset, no problem with channel + if ( + error.code() == grpc.StatusCode.UNAVAILABLE + and 'not the leader' in error.details() + ): + self._logger.debug( + f'RAFT node of {current_deployment} is not the leader. Trying next replica, if available.' + ) + skip_resetting = True # no need to reset, no problem with channel else: self._logger.debug( f'gRPC call to {current_deployment} for {task_type} errored, with error {format_grpc_error(error)} and for the {retry_i + 1}th time.' @@ -424,13 +437,13 @@ async def _handle_aiorpcerror( return None def _send_single_doc_request( - self, - request: SingleDocumentRequest, - connections: _ReplicaList, - endpoint: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - timeout: Optional[float] = None, - retries: Optional[int] = -1, + self, + request: SingleDocumentRequest, + connections: _ReplicaList, + endpoint: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + timeout: Optional[float] = None, + retries: Optional[int] = -1, ) -> 'asyncio.Task[Union[Tuple, AioRpcError, InternalNetworkError]]': # this wraps the awaitable object from grpc as a coroutine so it can be used as a task # the grpc call function is not a coroutine but some _AioCall @@ -447,16 +460,16 @@ async def async_generator_wrapper(): num_replicas = len(connections.get_all_connections()) if retries is None or retries < 0: total_num_tries = ( - max(DEFAULT_MINIMUM_RETRIES, len(connections.get_all_connections())) - + 1 + max(DEFAULT_MINIMUM_RETRIES, len(connections.get_all_connections())) + + 1 ) else: total_num_tries = 1 + retries # try once, then do all the retries for i in range(total_num_tries): current_connection = None while ( - current_connection is None - or current_connection.address in tried_addresses + current_connection is None + or current_connection.address in tried_addresses ): current_connection = await connections.get_next_connection( num_retries=total_num_tries @@ -467,11 +480,14 @@ async def async_generator_wrapper(): break tried_addresses.add(current_connection.address) try: - async for resp, metadata_resp in current_connection.send_single_doc_request( - request=request, - metadata=metadata, - compression=self.compression, - timeout=timeout, + async for ( + resp, + metadata_resp, + ) in current_connection.send_single_doc_request( + request=request, + metadata=metadata, + compression=self.compression, + timeout=timeout, ): yield resp, metadata_resp return @@ -485,7 +501,7 @@ async def async_generator_wrapper(): current_address=current_connection.address, current_deployment=current_connection.deployment_name, connection_list=connections, - task_type='SingleDocumentRequest' + task_type='SingleDocumentRequest', ) if error: yield error, None @@ -497,13 +513,13 @@ async def async_generator_wrapper(): return async_generator_wrapper() def _send_requests( - self, - requests: List[Request], - connections: _ReplicaList, - endpoint: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - timeout: Optional[float] = None, - retries: Optional[int] = -1, + self, + requests: List[Request], + connections: _ReplicaList, + endpoint: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + timeout: Optional[float] = None, + retries: Optional[int] = -1, ) -> 'asyncio.Task[Union[Tuple, AioRpcError, InternalNetworkError]]': # this wraps the awaitable object from grpc as a coroutine so it can be used as a task # the grpc call function is not a coroutine but some _AioCall @@ -520,16 +536,16 @@ async def task_wrapper(): num_replicas = len(connections.get_all_connections()) if retries is None or retries < 0: total_num_tries = ( - max(DEFAULT_MINIMUM_RETRIES, len(connections.get_all_connections())) - + 1 + max(DEFAULT_MINIMUM_RETRIES, len(connections.get_all_connections())) + + 1 ) else: total_num_tries = 1 + retries # try once, then do all the retries for i in range(total_num_tries): current_connection = None while ( - current_connection is None - or current_connection.address in tried_addresses + current_connection is None + or current_connection.address in tried_addresses ): current_connection = await connections.get_next_connection( num_retries=total_num_tries @@ -556,7 +572,7 @@ async def task_wrapper(): current_address=current_connection.address, current_deployment=current_connection.deployment_name, connection_list=connections, - task_type='DataRequest' + task_type='DataRequest', ) if error: return error @@ -566,10 +582,10 @@ async def task_wrapper(): return asyncio.create_task(task_wrapper()) def _send_discover_endpoint( - self, - connection_list: _ReplicaList, - timeout: Optional[float] = None, - retries: Optional[int] = -1, + self, + connection_list: _ReplicaList, + timeout: Optional[float] = None, + retries: Optional[int] = -1, ): # this wraps the awaitable object from grpc as a coroutine so it can be used as a task # the grpc call function is not a coroutine but some _AioCall @@ -577,11 +593,11 @@ async def task_coroutine(): tried_addresses = set() if retries is None or retries < 0: total_num_tries = ( - max( - DEFAULT_MINIMUM_RETRIES, - len(connection_list.get_all_connections()), - ) - + 1 + max( + DEFAULT_MINIMUM_RETRIES, + len(connection_list.get_all_connections()), + ) + + 1 ) else: total_num_tries = 1 + retries # try once, then do all the retries @@ -603,7 +619,7 @@ async def task_coroutine(): current_deployment=connection.deployment_name, connection_list=connection_list, total_num_tries=total_num_tries, - task_type='EndpointDiscovery' + task_type='EndpointDiscovery', ) if error: raise error diff --git a/jina/serve/networking/connection_stub.py b/jina/serve/networking/connection_stub.py index f2ffd564fde27..95de5538f8c77 100644 --- a/jina/serve/networking/connection_stub.py +++ b/jina/serve/networking/connection_stub.py @@ -122,11 +122,13 @@ def _record_received_bytes_metric(self, nbytes: int): nbytes, self.stub_specific_labels ) - async def send_single_doc_request(self, - request: SingleDocumentRequest, - metadata, - compression, - timeout: Optional[float] = None): + async def send_single_doc_request( + self, + request: SingleDocumentRequest, + metadata, + compression, + timeout: Optional[float] = None, + ): """ Send requests and uses the appropriate grpc stub for this Stub is chosen based on availability and type of requests @@ -147,10 +149,10 @@ async def send_single_doc_request(self, with timer: async for response in self.stream_doc_stub.stream_doc( - request, - compression=compression, - timeout=timeout, - metadata=metadata, + request, + compression=compression, + timeout=timeout, + metadata=metadata, ): self._record_received_bytes_metric(response.nbytes) yield response, None diff --git a/jina/serve/networking/replica_list.py b/jina/serve/networking/replica_list.py index e23a07aaaae96..9804a9050cad1 100644 --- a/jina/serve/networking/replica_list.py +++ b/jina/serve/networking/replica_list.py @@ -23,15 +23,15 @@ class _ReplicaList: """ def __init__( - self, - metrics: _NetworkingMetrics, - histograms: _NetworkingHistograms, - logger, - runtime_name: str, - aio_tracing_client_interceptors: Optional[Sequence['ClientInterceptor']] = None, - tracing_client_interceptor: Optional['OpenTelemetryClientInterceptor'] = None, - deployment_name: str = '', - channel_options: Optional[Union[list, Dict[str, Any]]] = None, + self, + metrics: _NetworkingMetrics, + histograms: _NetworkingHistograms, + logger, + runtime_name: str, + aio_tracing_client_interceptors: Optional[Sequence['ClientInterceptor']] = None, + tracing_client_interceptor: Optional['OpenTelemetryClientInterceptor'] = None, + deployment_name: str = '', + channel_options: Optional[Union[list, Dict[str, Any]]] = None, ): self.runtime_name = runtime_name self._connections = [] @@ -59,8 +59,8 @@ async def reset_connection(self, address: str, deployment_name: str): parsed_address = urlparse(address) resolved_address = parsed_address.netloc if parsed_address.netloc else address if ( - resolved_address in self._address_to_connection_idx - and self._address_to_connection_idx[resolved_address] is not None + resolved_address in self._address_to_connection_idx + and self._address_to_connection_idx[resolved_address] is not None ): # remove connection: # in contrast to remove_connection(), we don't 'shorten' the data structures below, instead @@ -117,7 +117,9 @@ async def remove_connection(self, address: str): self._address_to_connection_idx[a] -= 1 def _create_connection(self, address, deployment_name: str): - self._logger.debug(f'create_connection connection for {deployment_name} to {address}') + self._logger.debug( + f'create_connection connection for {deployment_name} to {address}' + ) parsed_address = urlparse(address) address = parsed_address.netloc if parsed_address.netloc else address use_tls = parsed_address.scheme in TLS_PROTOCOL_SCHEMES diff --git a/jina/serve/runtimes/asyncio.py b/jina/serve/runtimes/asyncio.py index 8d2fc8beeb8bc..ca4504cbb7f29 100644 --- a/jina/serve/runtimes/asyncio.py +++ b/jina/serve/runtimes/asyncio.py @@ -337,10 +337,12 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.logger.debug(f'{self!r} is interrupted by user') elif exc_type and issubclass(exc_type, Exception): self.logger.error( - f'{exc_val!r} during {self.run_forever!r}' - + f'\n add "--quiet-error" to suppress the exception details' - if not self.args.quiet_error - else '', + ( + f'{exc_val!r} during {self.run_forever!r}' + + f'\n add "--quiet-error" to suppress the exception details' + if not self.args.quiet_error + else '' + ), exc_info=not self.args.quiet_error, ) try: @@ -350,10 +352,12 @@ def __exit__(self, exc_type, exc_val, exc_tb): pass except Exception as ex: self.logger.error( - f'{ex!r} during {self.teardown!r}' - + f'\n add "--quiet-error" to suppress the exception details' - if not self.args.quiet_error - else '', + ( + f'{ex!r} during {self.teardown!r}' + + f'\n add "--quiet-error" to suppress the exception details' + if not self.args.quiet_error + else '' + ), exc_info=not self.args.quiet_error, ) diff --git a/jina/serve/runtimes/gateway/async_request_response_handling.py b/jina/serve/runtimes/gateway/async_request_response_handling.py index aa76202d45a0f..3a5fdca142138 100644 --- a/jina/serve/runtimes/gateway/async_request_response_handling.py +++ b/jina/serve/runtimes/gateway/async_request_response_handling.py @@ -201,9 +201,11 @@ async def _process_results_at_end_gateway( asyncio.ensure_future( _process_results_at_end_gateway(responding_tasks, request_graph) ), - asyncio.ensure_future(asyncio.gather(*floating_tasks)) - if len(floating_tasks) > 0 - else None, + ( + asyncio.ensure_future(asyncio.gather(*floating_tasks)) + if len(floating_tasks) > 0 + else None + ), ) return _handle_request diff --git a/jina/serve/runtimes/gateway/composite/__init__.py b/jina/serve/runtimes/gateway/composite/__init__.py index a105e724218c6..865a678ffb2a3 100644 --- a/jina/serve/runtimes/gateway/composite/__init__.py +++ b/jina/serve/runtimes/gateway/composite/__init__.py @@ -8,4 +8,5 @@ class CompositeGateway(CompositeServer, BaseGateway): """ :class:`CompositeGateway` is a CompositeServer that can be loaded from YAML as any other Gateway """ + pass diff --git a/jina/serve/runtimes/gateway/gateway.py b/jina/serve/runtimes/gateway/gateway.py index 04a5625613d45..ebb5ecb5424b6 100644 --- a/jina/serve/runtimes/gateway/gateway.py +++ b/jina/serve/runtimes/gateway/gateway.py @@ -52,5 +52,6 @@ class Gateway(BaseServer, BaseGateway): """ The class for where to inherit when you want to customize your Gateway. Important to provide backwards compatibility """ + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/jina/serve/runtimes/gateway/graph/topology_graph.py b/jina/serve/runtimes/gateway/graph/topology_graph.py index 40c05028873c8..89ad937c4698a 100644 --- a/jina/serve/runtimes/gateway/graph/topology_graph.py +++ b/jina/serve/runtimes/gateway/graph/topology_graph.py @@ -40,16 +40,16 @@ class TopologyGraph: class _ReqReplyNode: def __init__( - self, - name: str, - number_of_parts: int = 1, - floating: bool = False, - filter_condition: dict = None, - metadata: Optional[Dict] = None, - reduce: bool = True, - timeout_send: Optional[float] = None, - retries: Optional[int] = -1, - logger: Optional[JinaLogger] = None, + self, + name: str, + number_of_parts: int = 1, + floating: bool = False, + filter_condition: dict = None, + metadata: Optional[Dict] = None, + reduce: bool = True, + timeout_send: Optional[float] = None, + retries: Optional[int] = -1, + logger: Optional[JinaLogger] = None, ): self.name = name self.outgoing_nodes = [] @@ -77,17 +77,26 @@ def leaf(self): def _validate_against_outgoing_nodes(self): def _check_schema_equality(schema_1, schema_2): from collections import OrderedDict + # Naive check of compatibility - schema_1_properties = OrderedDict(sorted(schema_1.get('properties', {}).items())) - schema_2_properties = OrderedDict(sorted(schema_2.get('properties', {}).items())) + schema_1_properties = OrderedDict( + sorted(schema_1.get('properties', {}).items()) + ) + schema_2_properties = OrderedDict( + sorted(schema_2.get('properties', {}).items()) + ) if len(schema_1_properties) != len(schema_2_properties): return False - for property_1, property_2 in zip(schema_1_properties.keys(), schema_2_properties.keys()): + for property_1, property_2 in zip( + schema_1_properties.keys(), schema_2_properties.keys() + ): if property_1 != property_2: return False - if schema_1_properties[property_1].get('type', None) != schema_2_properties[property_2].get('type', None): + if schema_1_properties[property_1].get( + 'type', None + ) != schema_2_properties[property_2].get('type', None): return False # TODO: Add more complex check for nested definitions @@ -109,18 +118,20 @@ def _check_schema_equality(schema_1, schema_2): if incoming_endp in node._pydantic_models_by_endpoint: if endp in node._pydantic_models_by_endpoint: - if not _check_schema_equality(self._pydantic_models_by_endpoint[outgoing_endp][ - 'output' - ].schema(), - node._pydantic_models_by_endpoint[incoming_endp][ - 'input' - ].schema()): + if not _check_schema_equality( + self._pydantic_models_by_endpoint[outgoing_endp][ + 'output' + ].schema(), + node._pydantic_models_by_endpoint[incoming_endp][ + 'input' + ].schema(), + ): raise Exception( f'The output schema of {self.name} at {outgoing_endp} endpoint is incompatible with the input schema of {node.name} at {incoming_endp} endpoint' ) else: if ( - outgoing_endp != __default_endpoint__ + outgoing_endp != __default_endpoint__ ): # It could happen that there is an Encoder with default followed by an indexer with [index, search] raise Exception( f'{node.name} does not expose {incoming_endp} which makes it impossible to be chained with {self.name} on {outgoing_endp}' @@ -152,7 +163,7 @@ def _update_requests_with_filter_condition(self, need_copy): self.parts_to_send[i] = req def _update_request_by_params( - self, deployment_name: str, request_input_parameters: Dict + self, deployment_name: str, request_input_parameters: Dict ): specific_parameters = _parse_specific_params( request_input_parameters, deployment_name @@ -164,34 +175,34 @@ def _handle_internalnetworkerror(self, err): err_code = err.code() if err_code == grpc.StatusCode.UNAVAILABLE: err._details = ( - err.details() - + f' |Gateway: Communication error with deployment {self.name} at address(es) {err.dest_addr}. ' - f'Head or worker(s) may be down.' + err.details() + + f' |Gateway: Communication error with deployment {self.name} at address(es) {err.dest_addr}. ' + f'Head or worker(s) may be down.' ) raise err elif err_code == grpc.StatusCode.DEADLINE_EXCEEDED: err._details = ( - err.details() - + f'|Gateway: Connection with deployment {self.name} at address(es) {err.dest_addr} could be established, but timed out.' - f' You can increase the allowed time by setting `timeout_send` in your Flow YAML `with` block or Flow `__init__()` method.' + err.details() + + f'|Gateway: Connection with deployment {self.name} at address(es) {err.dest_addr} could be established, but timed out.' + f' You can increase the allowed time by setting `timeout_send` in your Flow YAML `with` block or Flow `__init__()` method.' ) raise err elif err_code == grpc.StatusCode.NOT_FOUND: err._details = ( - err.details() - + f'\n|Gateway: Connection error with deployment `{self.name}` at address(es) {err.dest_addr}.' - f' Connection with {err.dest_addr} succeeded, but `{self.name}` was not found.' - f' Possibly `{self.name}` is behind an API gateway but not reachable.' + err.details() + + f'\n|Gateway: Connection error with deployment `{self.name}` at address(es) {err.dest_addr}.' + f' Connection with {err.dest_addr} succeeded, but `{self.name}` was not found.' + f' Possibly `{self.name}` is behind an API gateway but not reachable.' ) raise err else: raise def get_endpoints( - self, - connection_pool: GrpcConnectionPool, - models_schema_list: List, - models_list: List, + self, + connection_pool: GrpcConnectionPool, + models_schema_list: List, + models_list: List, ) -> asyncio.Task: # models_schema_list and models_list is given to each node. And each one fills its models from google.protobuf import json_format @@ -219,9 +230,9 @@ async def task(): input_model = models_list[ models_schema_list.index(input_model_schema) ] - models_created_by_name[ - input_model_name - ] = input_model + models_created_by_name[input_model_name] = ( + input_model + ) else: if input_model_name not in models_created_by_name: if input_model_schema == legacy_doc_schema: @@ -234,9 +245,9 @@ async def task(): models_created_by_name, ) ) - models_created_by_name[ - input_model_name - ] = input_model + models_created_by_name[input_model_name] = ( + input_model + ) input_model = models_created_by_name[ input_model_name ] @@ -249,9 +260,9 @@ async def task(): output_model = models_list[ models_schema_list.index(output_model_schema) ] - models_created_by_name[ - output_model_name - ] = output_model + models_created_by_name[output_model_name] = ( + output_model + ) else: if output_model_name not in models_created_by_name: if output_model_name == legacy_doc_schema: @@ -264,9 +275,9 @@ async def task(): models_created_by_name, ) ) - models_created_by_name[ - output_model_name - ] = output_model + models_created_by_name[output_model_name] = ( + output_model + ) output_model = models_created_by_name[ output_model_name ] @@ -289,8 +300,8 @@ async def task(): ] = parameters_model else: if ( - parameters_model_name - not in models_created_by_name + parameters_model_name + not in models_created_by_name ): from pydantic import BaseModel @@ -328,11 +339,11 @@ async def task(): return asyncio.create_task(task()) async def stream_single_doc( - self, - request: SingleDocumentRequest, - connection_pool: GrpcConnectionPool, - endpoint: Optional[str], - return_type: Type[DocumentArray] = DocumentArray, + self, + request: SingleDocumentRequest, + connection_pool: GrpcConnectionPool, + endpoint: Optional[str], + return_type: Type[DocumentArray] = DocumentArray, ): if docarray_v2: if self.endpoints and endpoint in self.endpoints: @@ -341,13 +352,13 @@ async def stream_single_doc( ] async for resp, _ in connection_pool.send_single_document_request( - request=request, - deployment=self.name, - metadata=self._metadata, - head=True, - endpoint=endpoint, - timeout=self._timeout_send, - retries=self._retries, + request=request, + deployment=self.name, + metadata=self._metadata, + head=True, + endpoint=endpoint, + timeout=self._timeout_send, + retries=self._retries, ): if issubclass(type(resp), BaseException): raise resp @@ -359,9 +370,9 @@ async def stream_single_doc( # if return_type is not specified or if it is a default type, cast using retrieved # schemas if ( - not return_type - or not return_type.doc_type - or return_type.doc_type is AnyDoc + not return_type + or not return_type.doc_type + or return_type.doc_type is AnyDoc ): resp.document_cls = self._pydantic_models_by_endpoint[ endpoint @@ -371,16 +382,16 @@ async def stream_single_doc( yield resp async def _wait_previous_and_send( - self, - request: Optional[DataRequest], - previous_task: Optional[asyncio.Task], - connection_pool: GrpcConnectionPool, - endpoint: Optional[str], - target_executor_pattern: Optional[str] = None, - request_input_parameters: Dict = {}, - copy_request_at_send: bool = False, - init_task: Optional[asyncio.Task] = None, - return_type: Type[DocumentArray] = None, + self, + request: Optional[DataRequest], + previous_task: Optional[asyncio.Task], + connection_pool: GrpcConnectionPool, + endpoint: Optional[str], + target_executor_pattern: Optional[str] = None, + request_input_parameters: Dict = {}, + copy_request_at_send: bool = False, + init_task: Optional[asyncio.Task] = None, + return_type: Type[DocumentArray] = None, ): # Check my condition and send request with the condition metadata = {} @@ -418,8 +429,8 @@ async def _wait_previous_and_send( # avoid sending to executor which does not bind to this endpoint if endpoint is not None and self.endpoints is not None: if ( - endpoint not in self.endpoints - and __default_endpoint__ not in self.endpoints + endpoint not in self.endpoints + and __default_endpoint__ not in self.endpoints ): return request, metadata @@ -429,7 +440,7 @@ async def _wait_previous_and_send( ] if target_executor_pattern is not None and not re.match( - target_executor_pattern, self.name + target_executor_pattern, self.name ): return request, metadata # otherwise, send to executor and get response @@ -450,23 +461,23 @@ async def _wait_previous_and_send( if docarray_v2: if self.endpoints and ( - endpoint in self.endpoints - or __default_endpoint__ in self.endpoints + endpoint in self.endpoints + or __default_endpoint__ in self.endpoints ): from docarray.base_doc import AnyDoc # if return_type is not specified or if it is a default type, cast using retrieved # schemas if ( - not return_type - or not return_type.doc_type - or return_type.doc_type is AnyDoc + not return_type + or not return_type.doc_type + or return_type.doc_type is AnyDoc ): pydantic_models = ( - self._pydantic_models_by_endpoint.get(endpoint) - or self._pydantic_models_by_endpoint.get( - __default_endpoint__ - ) + self._pydantic_models_by_endpoint.get(endpoint) + or self._pydantic_models_by_endpoint.get( + __default_endpoint__ + ) ) resp.document_array_cls = DocList[ pydantic_models['output'] @@ -498,13 +509,13 @@ async def _wait_previous_and_send( return None, {} def _get_input_output_model_for_endpoint( - self, - previous_input, - previous_output, - previous_is_generator, - previous_is_singleton_doc, - previous_parameters, - endpoint, + self, + previous_input, + previous_output, + previous_is_generator, + previous_is_singleton_doc, + previous_parameters, + endpoint, ): if self._pydantic_models_by_endpoint is not None: @@ -523,11 +534,11 @@ def _get_input_output_model_for_endpoint( ] if ( - previous_output - and previous_output.schema() - == self._pydantic_models_by_endpoint[endpoint][ - "output" - ].schema() + previous_output + and previous_output.schema() + == self._pydantic_models_by_endpoint[endpoint][ + "output" + ].schema() ): # this is needed to not mix model IDs, otherwise FastAPI gets crazy return { @@ -566,13 +577,13 @@ def _get_input_output_model_for_endpoint( return None def _get_leaf_input_output_model( - self, - previous_input, - previous_output, - previous_is_generator, - previous_is_singleton_doc, - previous_parameters, - endpoint: Optional[str] = None, + self, + previous_input, + previous_output, + previous_is_generator, + previous_is_singleton_doc, + previous_parameters, + endpoint: Optional[str] = None, ): new_map = self._get_input_output_model_for_endpoint( previous_input, @@ -591,15 +602,15 @@ def _get_leaf_input_output_model( list_of_maps = outgoing_node._get_leaf_input_output_model( previous_input=new_map['input'] if new_map is not None else None, previous_output=new_map['output'] if new_map is not None else None, - previous_is_generator=new_map['is_generator'] - if new_map is not None - else None, - previous_is_singleton_doc=new_map['is_singleton_doc'] - if new_map is not None - else None, - previous_parameters=new_map['parameters'] - if new_map is not None - else None, + previous_is_generator=( + new_map['is_generator'] if new_map is not None else None + ), + previous_is_singleton_doc=( + new_map['is_singleton_doc'] if new_map is not None else None + ), + previous_parameters=( + new_map['parameters'] if new_map is not None else None + ), endpoint=endpoint, ) # We are interested in the last one, that will be the task that awaits all the previous @@ -608,17 +619,17 @@ def _get_leaf_input_output_model( return list_of_outputs def get_leaf_req_response_tasks( - self, - connection_pool: GrpcConnectionPool, - request_to_send: Optional[DataRequest], - previous_task: Optional[asyncio.Task], - endpoint: Optional[str] = None, - target_executor_pattern: Optional[str] = None, - request_input_parameters: Dict = {}, - request_input_has_specific_params: bool = False, - copy_request_at_send: bool = False, - init_task: Optional[asyncio.Task] = None, - return_type: Type[DocumentArray] = DocumentArray, + self, + connection_pool: GrpcConnectionPool, + request_to_send: Optional[DataRequest], + previous_task: Optional[asyncio.Task], + endpoint: Optional[str] = None, + target_executor_pattern: Optional[str] = None, + request_input_parameters: Dict = {}, + request_input_has_specific_params: bool = False, + copy_request_at_send: bool = False, + init_task: Optional[asyncio.Task] = None, + return_type: Type[DocumentArray] = DocumentArray, ) -> List[Tuple[bool, asyncio.Task]]: """ Gets all the tasks corresponding from all the subgraphs born from this node @@ -685,7 +696,7 @@ def get_leaf_req_response_tasks( request_input_parameters=request_input_parameters, request_input_has_specific_params=request_input_has_specific_params, copy_request_at_send=num_outgoing_nodes > 1 - and request_input_has_specific_params, + and request_input_has_specific_params, return_type=return_type, ) # We are interested in the last one, that will be the task that awaits all the previous @@ -721,7 +732,6 @@ def _find_route(request): return request class _EndGatewayNode(_ReqReplyNode): - """ Dummy node to be added before the gateway. This is to solve a problem we had when implementing `floating Executors`. If we do not add this at the end, this structure does not work: @@ -739,18 +749,18 @@ async def task_wrapper(): return asyncio.create_task(task_wrapper()) def get_leaf_req_response_tasks( - self, previous_task: Optional[asyncio.Task], *args, **kwargs + self, previous_task: Optional[asyncio.Task], *args, **kwargs ) -> List[Tuple[bool, asyncio.Task]]: return [(True, previous_task)] def _get_leaf_input_output_model( - self, - previous_input, - previous_output, - previous_is_generator, - previous_is_singleton_doc, - previous_parameters, - endpoint: Optional[str] = None, + self, + previous_input, + previous_output, + previous_is_generator, + previous_is_singleton_doc, + previous_parameters, + endpoint: Optional[str] = None, ): return [ { @@ -763,16 +773,16 @@ def _get_leaf_input_output_model( ] def __init__( - self, - graph_representation: Dict, - graph_conditions: Dict = {}, - deployments_metadata: Dict = {}, - deployments_no_reduce: List[str] = [], - timeout_send: Optional[float] = 1.0, - retries: Optional[int] = -1, - logger: Optional[JinaLogger] = None, - *args, - **kwargs, + self, + graph_representation: Dict, + graph_conditions: Dict = {}, + deployments_metadata: Dict = {}, + deployments_no_reduce: List[str] = [], + timeout_send: Optional[float] = 1.0, + retries: Optional[int] = -1, + logger: Optional[JinaLogger] = None, + *args, + **kwargs, ): self.logger = logger or JinaLogger(self.__class__.__name__) num_parts_per_node = defaultdict(int) @@ -798,9 +808,11 @@ def __init__( metadata = deployments_metadata.get(node_name, None) nodes[node_name] = self._ReqReplyNode( name=node_name, - number_of_parts=num_parts_per_node[node_name] - if num_parts_per_node[node_name] > 0 - else 1, + number_of_parts=( + num_parts_per_node[node_name] + if num_parts_per_node[node_name] > 0 + else 1 + ), floating=node_name in floating_deployment_set, filter_condition=condition, metadata=metadata, @@ -825,7 +837,7 @@ def __init__( self._all_endpoints = None async def _get_all_endpoints( - self, connection_pool, retry_forever=False, is_cancel=None + self, connection_pool, retry_forever=False, is_cancel=None ): def _condition(): if is_cancel is not None: diff --git a/jina/serve/runtimes/gateway/grpc/__init__.py b/jina/serve/runtimes/gateway/grpc/__init__.py index c771bf6503ff2..590f57a71d408 100644 --- a/jina/serve/runtimes/gateway/grpc/__init__.py +++ b/jina/serve/runtimes/gateway/grpc/__init__.py @@ -8,5 +8,5 @@ class GRPCGateway(GRPCServer, BaseGateway): """ :class:`GRPCGateway` is a GRPCServer that can be loaded from YAML as any other Gateway """ - pass + pass diff --git a/jina/serve/runtimes/gateway/http/__init__.py b/jina/serve/runtimes/gateway/http/__init__.py index 93f4be83e703c..520350528e51b 100644 --- a/jina/serve/runtimes/gateway/http/__init__.py +++ b/jina/serve/runtimes/gateway/http/__init__.py @@ -1,4 +1,6 @@ -from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway # keep import here for backwards compatibility +from jina.serve.runtimes.gateway.http.fastapi import ( + FastAPIBaseGateway, +) # keep import here for backwards compatibility from jina.serve.runtimes.gateway.gateway import BaseGateway from jina.serve.runtimes.servers.http import HTTPServer @@ -9,4 +11,5 @@ class HTTPGateway(HTTPServer, BaseGateway): """ :class:`HTTPGateway` is a FastAPIBaseGateway that uses the default FastAPI app """ + pass diff --git a/jina/serve/runtimes/gateway/http/fastapi/__init__.py b/jina/serve/runtimes/gateway/http/fastapi/__init__.py index 98f5f552417ba..1d771aacbc507 100644 --- a/jina/serve/runtimes/gateway/http/fastapi/__init__.py +++ b/jina/serve/runtimes/gateway/http/fastapi/__init__.py @@ -8,4 +8,5 @@ class FastAPIBaseGateway(FastAPIBaseServer, BaseGateway): """ :class:`FastAPIBaseGateway` is a FastAPIBaseServer that can be loaded from YAML as any other Gateway """ + pass diff --git a/jina/serve/runtimes/gateway/http_fastapi_app.py b/jina/serve/runtimes/gateway/http_fastapi_app.py index 7275c2a6b18e2..fe48e3d5aa985 100644 --- a/jina/serve/runtimes/gateway/http_fastapi_app.py +++ b/jina/serve/runtimes/gateway/http_fastapi_app.py @@ -15,18 +15,18 @@ def get_fastapi_app( - streamer: 'GatewayStreamer', - title: str, - description: str, - no_debug_endpoints: bool, - no_crud_endpoints: bool, - expose_endpoints: Optional[str], - expose_graphql_endpoint: bool, - cors: bool, - logger: 'JinaLogger', - tracing: Optional[bool] = None, - tracer_provider: Optional['trace.TracerProvider'] = None, - **kwargs + streamer: 'GatewayStreamer', + title: str, + description: str, + no_debug_endpoints: bool, + no_crud_endpoints: bool, + expose_endpoints: Optional[str], + expose_graphql_endpoint: bool, + cors: bool, + logger: 'JinaLogger', + tracing: Optional[bool] = None, + tracer_provider: Optional['trace.TracerProvider'] = None, + **kwargs, ): """ Get the app from FastAPI as the REST interface. @@ -60,8 +60,8 @@ def get_fastapi_app( app = FastAPI( title=title or 'My Jina Service', description=description - or 'This is my awesome service. You can set `title` and `description` in your `Flow` or `Gateway` ' - 'to customize the title and description.', + or 'This is my awesome service. You can set `title` and `description` in your `Flow` or `Gateway` ' + 'to customize the title and description.', version=__version__, ) @@ -90,23 +90,21 @@ async def _shutdown(): { 'name': 'Debug', 'description': 'Debugging interface. In production, you should hide them by setting ' - '`--no-debug-endpoints` in `Flow`/`Gateway`.', + '`--no-debug-endpoints` in `Flow`/`Gateway`.', } ) from jina._docarray import DocumentArray from jina.proto import jina_pb2 from jina.serve.executors import __dry_run_endpoint__ - from jina.serve.runtimes.gateway.models import ( - PROTO_TO_PYDANTIC_MODELS - ) + from jina.serve.runtimes.gateway.models import PROTO_TO_PYDANTIC_MODELS from jina.serve.runtimes.gateway.health_model import JinaInfoModel from jina.types.request.status import StatusMessage @app.get( path='/dry_run', summary='Get the readiness of Jina Flow service, sends an empty DocumentArray to the complete Flow to ' - 'validate connectivity', + 'validate connectivity', response_model=PROTO_TO_PYDANTIC_MODELS.StatusProto, ) async def _flow_health(): @@ -161,7 +159,7 @@ async def _status(): # do not add response_model here, this debug endpoint should not restricts the response model ) async def post( - body: JinaEndpointRequestModel, response: Response + body: JinaEndpointRequestModel, response: Response ): # 'response' is a FastAPI response, not a Jina response """ Post a data request to some endpoint. @@ -195,8 +193,8 @@ async def post( import grpc if ( - err.code() == grpc.StatusCode.UNAVAILABLE - or err.code() == grpc.StatusCode.NOT_FOUND + err.code() == grpc.StatusCode.UNAVAILABLE + or err.code() == grpc.StatusCode.NOT_FOUND ): response.status_code = status.HTTP_503_SERVICE_UNAVAILABLE elif err.code() == grpc.StatusCode.DEADLINE_EXCEEDED: @@ -249,11 +247,15 @@ def expose_executor_endpoint(exec_endpoint, http_path=None, **kwargs): kwargs['methods'] = kwargs.get('methods', ['POST']) if kwargs['methods'] == ['POST']: + @app.api_route( - path=http_path or exec_endpoint, name=http_path or exec_endpoint, **kwargs + path=http_path or exec_endpoint, + name=http_path or exec_endpoint, + **kwargs, ) async def foo_post(body: JinaRequestModel, response: Response): from jina.enums import DataInputType + bd = body.dict() if body else {'data': None} bd['exec_endpoint'] = exec_endpoint req_generator_input = bd @@ -269,8 +271,8 @@ async def foo_post(body: JinaRequestModel, response: Response): import grpc if ( - err.code() == grpc.StatusCode.UNAVAILABLE - or err.code() == grpc.StatusCode.NOT_FOUND + err.code() == grpc.StatusCode.UNAVAILABLE + or err.code() == grpc.StatusCode.NOT_FOUND ): response.status_code = status.HTTP_503_SERVICE_UNAVAILABLE elif err.code() == grpc.StatusCode.DEADLINE_EXCEEDED: @@ -285,12 +287,17 @@ async def foo_post(body: JinaRequestModel, response: Response): f'Error while getting responses from deployments: {err.details()}' ) return result + else: + @app.api_route( - path=http_path or exec_endpoint, name=http_path or exec_endpoint, **kwargs + path=http_path or exec_endpoint, + name=http_path or exec_endpoint, + **kwargs, ) async def foo_no_post(body: JinaRequestModel): from jina.enums import DataInputType + bd = body.dict() if body else {'data': None} bd['exec_endpoint'] = exec_endpoint req_generator_input = bd @@ -307,7 +314,7 @@ async def foo_no_post(body: JinaRequestModel): { 'name': 'CRUD', 'description': 'CRUD interface. If your service does not implement those interfaces, you can should ' - 'hide them by setting `--no-crud-endpoints` in `Flow`/`Gateway`.', + 'hide them by setting `--no-crud-endpoints` in `Flow`/`Gateway`.', } ) crud = { @@ -318,9 +325,9 @@ async def foo_no_post(body: JinaRequestModel): } for k, v in crud.items(): v['tags'] = ['CRUD'] - v[ - 'description' - ] = f'Post data requests to the Flow. Executors with `@requests(on="{k}")` will respond.' + v['description'] = ( + f'Post data requests to the Flow. Executors with `@requests(on="{k}")` will respond.' + ) expose_executor_endpoint(exec_endpoint=k, **v) if openapi_tags: @@ -350,7 +357,7 @@ async def foo_no_post(body: JinaRequestModel): raise NotImplementedError('GraphQL is not yet supported for DocArrayV2') async def get_docs_from_endpoint( - data, target_executor, parameters, exec_endpoint + data, target_executor, parameters, exec_endpoint ): req_generator_input = { 'data': [asdict(d) for d in data], @@ -361,8 +368,8 @@ async def get_docs_from_endpoint( } if ( - req_generator_input['data'] is not None - and 'docs' in req_generator_input['data'] + req_generator_input['data'] is not None + and 'docs' in req_generator_input['data'] ): req_generator_input['data'] = req_generator_input['data']['docs'] try: @@ -380,11 +387,11 @@ async def get_docs_from_endpoint( class Mutation: @strawberry.mutation async def docs( - self, - data: Optional[List[StrawberryDocumentInput]] = None, - target_executor: Optional[str] = None, - parameters: Optional[JSONScalar] = None, - exec_endpoint: str = '/search', + self, + data: Optional[List[StrawberryDocumentInput]] = None, + target_executor: Optional[str] = None, + parameters: Optional[JSONScalar] = None, + exec_endpoint: str = '/search', ) -> List[StrawberryDocument]: return await get_docs_from_endpoint( data, target_executor, parameters, exec_endpoint @@ -394,11 +401,11 @@ async def docs( class Query: @strawberry.field async def docs( - self, - data: Optional[List[StrawberryDocumentInput]] = None, - target_executor: Optional[str] = None, - parameters: Optional[JSONScalar] = None, - exec_endpoint: str = '/search', + self, + data: Optional[List[StrawberryDocumentInput]] = None, + target_executor: Optional[str] = None, + parameters: Optional[JSONScalar] = None, + exec_endpoint: str = '/search', ) -> List[StrawberryDocument]: return await get_docs_from_endpoint( data, target_executor, parameters, exec_endpoint diff --git a/jina/serve/runtimes/gateway/load_balancer/__init__.py b/jina/serve/runtimes/gateway/load_balancer/__init__.py index 64f5e9d8392e9..a86d6300b2691 100644 --- a/jina/serve/runtimes/gateway/load_balancer/__init__.py +++ b/jina/serve/runtimes/gateway/load_balancer/__init__.py @@ -8,4 +8,5 @@ class LoadBalancerGateway(LoadBalancingServer, BaseGateway): """ :class:`LoadBalancerGateway` """ + pass diff --git a/jina/serve/runtimes/gateway/models.py b/jina/serve/runtimes/gateway/models.py index 6e09e33c12ad9..164c1113c7e00 100644 --- a/jina/serve/runtimes/gateway/models.py +++ b/jina/serve/runtimes/gateway/models.py @@ -177,9 +177,11 @@ def protobuf_to_pydantic_model( all_fields[field_name] = ( field_type, - Field(default_factory=default_factory) - if default_factory - else Field(default=default_value), + ( + Field(default_factory=default_factory) + if default_factory + else Field(default=default_value) + ), ) # Post-processing (Handle oneof fields) @@ -227,6 +229,7 @@ def _to_camel_case(snake_str: str) -> str: # with the 'title' method and join them together. return components[0] + ''.join(x.title() for x in components[1:]) + if not docarray_v2: from docarray.document.pydantic_model import PydanticDocument, PydanticDocumentArray @@ -265,7 +268,6 @@ class Config: alias_generator = _to_camel_case allow_population_by_field_name = True - class JinaResponseModel(BaseModel): """ Jina HTTP Response model. Only `request_id` and `data` are preserved. @@ -280,7 +282,6 @@ class Config: alias_generator = _to_camel_case allow_population_by_field_name = True - class JinaEndpointRequestModel(JinaRequestModel): """ Jina HTTP request model that allows customized endpoint. diff --git a/jina/serve/runtimes/gateway/request_handling.py b/jina/serve/runtimes/gateway/request_handling.py index ad294e5f5ef01..4da74fec2271e 100644 --- a/jina/serve/runtimes/gateway/request_handling.py +++ b/jina/serve/runtimes/gateway/request_handling.py @@ -70,9 +70,11 @@ def __init__( meter=meter, aio_tracing_client_interceptors=aio_tracing_client_interceptors, tracing_client_interceptor=tracing_client_interceptor, - grpc_channel_options=self.runtime_args.grpc_channel_options - if hasattr(self.runtime_args, 'grpc_channel_options') - else None, + grpc_channel_options=( + self.runtime_args.grpc_channel_options + if hasattr(self.runtime_args, 'grpc_channel_options') + else None + ), ) GatewayStreamer._set_env_streamer_args( diff --git a/jina/serve/runtimes/gateway/websocket/__init__.py b/jina/serve/runtimes/gateway/websocket/__init__.py index c112cadc40ad7..a56db97cb867e 100644 --- a/jina/serve/runtimes/gateway/websocket/__init__.py +++ b/jina/serve/runtimes/gateway/websocket/__init__.py @@ -8,4 +8,5 @@ class WebSocketGateway(WebSocketServer, BaseGateway): """ :class:`WebSocketGateway` is a WebSocketServer that can be loaded from YAML as any other Gateway """ - pass \ No newline at end of file + + pass diff --git a/jina/serve/runtimes/gateway/websocket_fastapi_app.py b/jina/serve/runtimes/gateway/websocket_fastapi_app.py index 700c07268f878..0d5b3a71d1191 100644 --- a/jina/serve/runtimes/gateway/websocket_fastapi_app.py +++ b/jina/serve/runtimes/gateway/websocket_fastapi_app.py @@ -24,10 +24,10 @@ def _fits_ws_close_msg(msg: str): def get_fastapi_app( - streamer: 'GatewayStreamer', - logger: 'JinaLogger', - tracing: Optional[bool] = None, - tracer_provider: Optional['trace.TracerProvider'] = None, + streamer: 'GatewayStreamer', + logger: 'JinaLogger', + tracing: Optional[bool] = None, + tracer_provider: Optional['trace.TracerProvider'] = None, ): """ Get the app from FastAPI as the Websocket interface. @@ -105,7 +105,7 @@ async def iter(self, websocket: WebSocket) -> AsyncIterator[Any]: pass async def send( - self, websocket: WebSocket, data: Union[DataRequest, StatusMessage] + self, websocket: WebSocket, data: Union[DataRequest, StatusMessage] ) -> None: subprotocol = self.protocol_dict[self.get_client(websocket)] if subprotocol == WebsocketSubProtocols.JSON: @@ -159,7 +159,7 @@ async def _shutdown(): @app.websocket('/') async def websocket_endpoint( - websocket: WebSocket, response: Response + websocket: WebSocket, response: Response ): # 'response' is a FastAPI response, not a Jina response await manager.connect(websocket) @@ -172,16 +172,25 @@ async def req_iter(): # NOTE: Helps in converting camelCase to snake_case # you can't do `yield from` inside an async function if not docarray_v2: - req_generator_input = JinaEndpointRequestModel(**request).dict() + req_generator_input = JinaEndpointRequestModel( + **request + ).dict() req_generator_input['data_type'] = DataInputType.DICT - if request['data'] is not None and 'docs' in request['data']: - req_generator_input['data'] = req_generator_input['data'][ - 'docs' - ] - for data_request in request_generator(**req_generator_input): + if ( + request['data'] is not None + and 'docs' in request['data'] + ): + req_generator_input['data'] = req_generator_input[ + 'data' + ]['docs'] + for data_request in request_generator( + **req_generator_input + ): yield data_request else: - raise RuntimeError(f' DocArray v2 is not compatible with {WebsocketSubProtocols.JSON} subprotocol') + raise RuntimeError( + f' DocArray v2 is not compatible with {WebsocketSubProtocols.JSON} subprotocol' + ) elif isinstance(request, bytes): if request == bytes(True): break @@ -238,7 +247,7 @@ async def _get_singleton_result(request_iterator) -> Dict: @app.get( path='/dry_run', summary='Get the readiness of Jina Flow service, sends an empty DocumentArray to the complete Flow to ' - 'validate connectivity', + 'validate connectivity', response_model=PROTO_TO_PYDANTIC_MODELS.StatusProto, ) async def _dry_run_http(): @@ -270,7 +279,7 @@ async def _dry_run_http(): @app.websocket('/dry_run') async def websocket_endpoint( - websocket: WebSocket, response: Response + websocket: WebSocket, response: Response ): # 'response' is a FastAPI response, not a Jina response from jina.proto import jina_pb2 from jina.serve.executors import __dry_run_endpoint__ @@ -283,11 +292,11 @@ async def websocket_endpoint( try: async for _ in streamer.rpc_stream( - request_iterator=request_generator( - exec_endpoint=__dry_run_endpoint__, - data=da, - data_type=DataInputType.DOCUMENT, - ) + request_iterator=request_generator( + exec_endpoint=__dry_run_endpoint__, + data=da, + data_type=DataInputType.DOCUMENT, + ) ): pass status_message = StatusMessage() diff --git a/jina/serve/runtimes/head/request_handling.py b/jina/serve/runtimes/head/request_handling.py index b8d5cf76dce9d..6891c68c02d6a 100644 --- a/jina/serve/runtimes/head/request_handling.py +++ b/jina/serve/runtimes/head/request_handling.py @@ -17,6 +17,7 @@ from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler from jina.types.request.data import DataRequest, Response from jina._docarray import docarray_v2 + if docarray_v2: from jina.serve.runtimes.helper import _create_pydantic_model_from_schema from docarray import DocList @@ -42,15 +43,15 @@ class HeaderRequestHandler(MonitoringRequestMixin): DEFAULT_POLLING = PollingType.ANY def __init__( - self, - args: 'argparse.Namespace', - logger: 'JinaLogger', - metrics_registry: Optional['CollectorRegistry'] = None, - meter=None, - runtime_name: Optional[str] = None, - aio_tracing_client_interceptors=None, - tracing_client_interceptor=None, - **kwargs, + self, + args: 'argparse.Namespace', + logger: 'JinaLogger', + metrics_registry: Optional['CollectorRegistry'] = None, + meter=None, + runtime_name: Optional[str] = None, + aio_tracing_client_interceptors=None, + tracing_client_interceptor=None, + **kwargs, ): if args.name is None: args.name = '' @@ -147,7 +148,7 @@ def __init__( connection_pool=self.connection_pool, name=self._deployment_name, retries=self._retries, - stop_event=self.endpoints_discovery_stop_event + stop_event=self.endpoints_discovery_stop_event, ) ) @@ -158,13 +159,13 @@ def _default_polling_dict(self, default_polling): ) async def _gather_worker_tasks( - self, - requests, - connection_pool, - deployment_name, - polling_type, - timeout_send, - retries, + self, + requests, + connection_pool, + deployment_name, + polling_type, + timeout_send, + retries, ): worker_send_tasks = connection_pool.send_requests( requests=requests, @@ -193,11 +194,11 @@ async def _gather_worker_tasks( @staticmethod def _merge_metadata( - metadata, - uses_after_metadata, - uses_before_metadata, - total_shards, - failed_shards, + metadata, + uses_after_metadata, + uses_before_metadata, + total_shards, + failed_shards, ): merged_metadata = {} if uses_before_metadata: @@ -215,17 +216,17 @@ def _merge_metadata( return merged_metadata async def _handle_data_request( - self, - requests, - connection_pool, - uses_before_address, - uses_after_address, - timeout_send, - retries, - reduce, - polling_type, - deployment_name, - endpoint + self, + requests, + connection_pool, + uses_before_address, + uses_after_address, + timeout_send, + retries, + reduce, + polling_type, + deployment_name, + endpoint, ) -> Tuple['DataRequest', Dict]: for req in requests: if docarray_v2: @@ -325,13 +326,18 @@ async def _handle_data_request( self._update_end_request_metrics(response_request) return response_request, merged_metadata - def _get_endpoints_from_workers(self, connection_pool: GrpcConnectionPool, name: str, retries: int, - stop_event): + def _get_endpoints_from_workers( + self, connection_pool: GrpcConnectionPool, name: str, retries: int, stop_event + ): from google.protobuf import json_format from docarray.documents.legacy import LegacyDocument + legacy_doc_schema = LegacyDocument.schema() + async def task(): - self.logger.debug(f'starting get endpoints from workers task for deployment {name}') + self.logger.debug( + f'starting get endpoints from workers task for deployment {name}' + ) while not stop_event.is_set(): try: endpoints = await connection_pool.send_discover_endpoint( @@ -349,27 +355,37 @@ async def task(): output_model_schema = inner_dict['output']['model'] if input_model_schema == legacy_doc_schema: - models_created_by_name[input_model_name] = LegacyDocument + models_created_by_name[input_model_name] = ( + LegacyDocument + ) elif input_model_name not in models_created_by_name: - input_model = _create_pydantic_model_from_schema(input_model_schema, input_model_name, {}) + input_model = _create_pydantic_model_from_schema( + input_model_schema, input_model_name, {} + ) models_created_by_name[input_model_name] = input_model if output_model_name == legacy_doc_schema: - models_created_by_name[output_model_name] = LegacyDocument + models_created_by_name[output_model_name] = ( + LegacyDocument + ) elif output_model_name not in models_created_by_name: - output_model = _create_pydantic_model_from_schema(output_model_schema, output_model_name, {}) + output_model = _create_pydantic_model_from_schema( + output_model_schema, output_model_name, {} + ) models_created_by_name[output_model_name] = output_model self._pydantic_models_by_endpoint[endpoint] = { 'input': models_created_by_name[input_model_name], - 'output': models_created_by_name[output_model_name] + 'output': models_created_by_name[output_model_name], } stop_event.set() return else: await asyncio.sleep(0.1) except Exception as exc: - self.logger.debug(f'Exception raised from sending discover endpoint {exc}') + self.logger.debug( + f'Exception raised from sending discover endpoint {exc}' + ) await asyncio.sleep(0.1) return task() @@ -385,7 +401,9 @@ def cancel_endpoint_discovery_from_workers_task(self): self.endpoints_discovery_stop_event.set() # this event is useless if simply cancel self.endpoints_discovery_task.cancel() except Exception as ex: - self.logger.debug(f'exception during endpoint discovery task cancellation: {ex}') + self.logger.debug( + f'exception during endpoint discovery task cancellation: {ex}' + ) pass async def close(self): @@ -446,22 +464,27 @@ async def process_data(self, requests: List[DataRequest], context) -> DataReques timeout_send=self.timeout_send, polling_type=self._polling[endpoint], deployment_name=self._deployment_name, - endpoint=endpoint + endpoint=endpoint, ) context.set_trailing_metadata(metadata.items()) return response - except InternalNetworkError as err: # can't connect, Flow broken, interrupt the streaming through gRPC error mechanism + except ( + InternalNetworkError + ) as err: # can't connect, Flow broken, interrupt the streaming through gRPC error mechanism return self._handle_internalnetworkerror( err=err, context=context, response=Response() ) except ( - RuntimeError, - Exception, + RuntimeError, + Exception, ) as ex: # some other error, keep streaming going just add error info self.logger.error( - f'{ex!r}' + f'\n add "--quiet-error" to suppress the exception details' - if not self.args.quiet_error - else '', + ( + f'{ex!r}' + + f'\n add "--quiet-error" to suppress the exception details' + if not self.args.quiet_error + else '' + ), exc_info=not self.args.quiet_error, ) requests[0].add_exception(ex, executor=None) @@ -501,7 +524,9 @@ async def endpoint_discovery(self, empty, context) -> jina_pb2.EndpointsProto: ) response.endpoints.extend(worker_response.endpoints) response.schemas.update(worker_response.schemas) - except InternalNetworkError as err: # can't connect, Flow broken, interrupt the streaming through gRPC error mechanism + except ( + InternalNetworkError + ) as err: # can't connect, Flow broken, interrupt the streaming through gRPC error mechanism return self._handle_internalnetworkerror( err=err, context=context, response=response ) @@ -526,7 +551,7 @@ async def _status(self, empty, context) -> jina_pb2.JinaInfoProto: return infoProto async def stream( - self, request_iterator, context=None, *args, **kwargs + self, request_iterator, context=None, *args, **kwargs ) -> AsyncIterator['Request']: """ stream requests from client iterator and stream responses back. diff --git a/jina/serve/runtimes/monitoring.py b/jina/serve/runtimes/monitoring.py index c4a5ec734e45f..28bdcae1b5015 100644 --- a/jina/serve/runtimes/monitoring.py +++ b/jina/serve/runtimes/monitoring.py @@ -31,9 +31,7 @@ def _setup_monitoring(self, monitoring: bool, port_monitoring: Union[int, str]): if monitoring: from prometheus_client import start_http_server - start_http_server( - int(port_monitoring), registry=self.metrics_registry - ) + start_http_server(int(port_monitoring), registry=self.metrics_registry) class MonitoringRequestMixin: diff --git a/jina/serve/runtimes/servers/__init__.py b/jina/serve/runtimes/servers/__init__.py index 5cc03e8c11edc..fac284970f765 100644 --- a/jina/serve/runtimes/servers/__init__.py +++ b/jina/serve/runtimes/servers/__init__.py @@ -24,13 +24,13 @@ class BaseServer(MonitoringMixin, InstrumentationMixin): """ def __init__( - self, - name: Optional[str] = 'gateway', - runtime_args: Optional[Dict] = None, - req_handler_cls=None, - req_handler=None, - is_cancel=None, - **kwargs, + self, + name: Optional[str] = 'gateway', + runtime_args: Optional[Dict] = None, + req_handler_cls=None, + req_handler=None, + is_cancel=None, + **kwargs, ): self.name = name or '' self.runtime_args = runtime_args @@ -40,6 +40,7 @@ def __init__( except: # in some unit tests we instantiate the server without an asyncio Loop import threading + self.is_cancel = threading.Event() if isinstance(runtime_args, Dict): self.works_as_load_balancer = runtime_args.get( @@ -192,11 +193,11 @@ def __exit__(self, exc_type, exc_val, exc_tb): @staticmethod def is_ready( - ctrl_address: str, - protocol: Optional[str] = 'grpc', - timeout: float = 1.0, - logger=None, - **kwargs, + ctrl_address: str, + protocol: Optional[str] = 'grpc', + timeout: float = 1.0, + logger=None, + **kwargs, ) -> bool: """ Check if status is ready. @@ -219,11 +220,11 @@ def is_ready( @staticmethod async def async_is_ready( - ctrl_address: str, - protocol: Optional[str] = 'grpc', - timeout: float = 1.0, - logger=None, - **kwargs, + ctrl_address: str, + protocol: Optional[str] = 'grpc', + timeout: float = 1.0, + logger=None, + **kwargs, ) -> bool: """ Check if status is ready. @@ -246,12 +247,14 @@ async def async_is_ready( @classmethod def wait_for_ready_or_shutdown( - cls, - timeout: Optional[float], - ready_or_shutdown_event: Union['multiprocessing.Event', 'threading.Event', 'asyncio.Event'], - ctrl_address: str, - health_check: bool = False, - **kwargs, + cls, + timeout: Optional[float], + ready_or_shutdown_event: Union[ + 'multiprocessing.Event', 'threading.Event', 'asyncio.Event' + ], + ctrl_address: str, + health_check: bool = False, + **kwargs, ): """ Check if the runtime has successfully started diff --git a/jina/serve/runtimes/servers/composite.py b/jina/serve/runtimes/servers/composite.py index 963e166e34d9e..757c4ca757e62 100644 --- a/jina/serve/runtimes/servers/composite.py +++ b/jina/serve/runtimes/servers/composite.py @@ -10,12 +10,13 @@ class CompositeBaseServer(BaseServer): """Composite Base Server implementation from which u can inherit a specific custom composite one""" + servers: List['BaseServer'] logger: 'JinaLogger' def __init__( - self, - **kwargs, + self, + **kwargs, ): """Initialize the gateway :param kwargs: keyword args @@ -41,7 +42,9 @@ def _server_kwargs(self): ) runtime_args.port = port runtime_args.protocol = protocol - server_kwargs = {k: v for k, v in self._kwargs.items() if k != 'runtime_args'} + server_kwargs = { + k: v for k, v in self._kwargs.items() if k != 'runtime_args' + } server_kwargs['runtime_args'] = dict(vars(runtime_args)) server_kwargs['req_handler'] = self._request_handler ret.append(server_kwargs) @@ -107,8 +110,8 @@ class CompositeServer(CompositeBaseServer): """Composite Server implementation""" def __init__( - self, - **kwargs, + self, + **kwargs, ): """Initialize the gateway :param kwargs: keyword args @@ -118,8 +121,10 @@ def __init__( self.servers: List[BaseServer] = [] for server_kwargs in self._server_kwargs: - server_cls = _get_gateway_class(server_kwargs['runtime_args']['protocol'], - works_as_load_balancer=self.works_as_load_balancer) + server_cls = _get_gateway_class( + server_kwargs['runtime_args']['protocol'], + works_as_load_balancer=self.works_as_load_balancer, + ) server = server_cls(**server_kwargs) self.servers.append(server) self.gateways = self.servers # for backwards compatibility diff --git a/jina/serve/runtimes/servers/grpc.py b/jina/serve/runtimes/servers/grpc.py index 9111fbe48621f..0f0bca545c97a 100644 --- a/jina/serve/runtimes/servers/grpc.py +++ b/jina/serve/runtimes/servers/grpc.py @@ -111,7 +111,9 @@ async def setup_server(self): jina_pb2.DESCRIPTOR.services_by_name['JinaSingleDataRequestRPC'].full_name, jina_pb2.DESCRIPTOR.services_by_name['JinaDataRequestRPC'].full_name, jina_pb2.DESCRIPTOR.services_by_name['JinaGatewayDryRunRPC'].full_name, - jina_pb2.DESCRIPTOR.services_by_name['JinaSingleDocumentRequestRPC'].full_name, + jina_pb2.DESCRIPTOR.services_by_name[ + 'JinaSingleDocumentRequestRPC' + ].full_name, jina_pb2.DESCRIPTOR.services_by_name['JinaDiscoverEndpointsRPC'].full_name, jina_pb2.DESCRIPTOR.services_by_name['JinaInfoRPC'].full_name, reflection.SERVICE_NAME, diff --git a/jina/serve/runtimes/servers/load_balancer.py b/jina/serve/runtimes/servers/load_balancer.py index 371ad4de80188..3e20d68ff154c 100644 --- a/jina/serve/runtimes/servers/load_balancer.py +++ b/jina/serve/runtimes/servers/load_balancer.py @@ -7,10 +7,7 @@ class LoadBalancingServer(BaseServer): implementing the `app` property. This property should return a fastapi app. The base Gateway will handle starting a server and serving the application using that server.""" - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): """Initialize the LoadBalancingServer :param kwargs: keyword args """ diff --git a/jina/serve/runtimes/servers/websocket.py b/jina/serve/runtimes/servers/websocket.py index 726d626956efd..183f2eae6dc12 100644 --- a/jina/serve/runtimes/servers/websocket.py +++ b/jina/serve/runtimes/servers/websocket.py @@ -12,12 +12,12 @@ class WebSocketServer(BaseServer): """WebSocket Server implementation""" def __init__( - self, - ssl_keyfile: Optional[str] = None, - ssl_certfile: Optional[str] = None, - uvicorn_kwargs: Optional[dict] = None, - proxy: Optional[bool] = None, - **kwargs + self, + ssl_keyfile: Optional[str] = None, + ssl_certfile: Optional[str] = None, + uvicorn_kwargs: Optional[dict] = None, + proxy: Optional[bool] = None, + **kwargs, ): """Initialize the gateway :param ssl_keyfile: the path to the key file @@ -42,11 +42,18 @@ async def setup_server(self): """ self.logger.debug(f'Setting up Websocket server') if docarray_v2: - from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler + from jina.serve.runtimes.gateway.request_handling import ( + GatewayRequestHandler, + ) + if isinstance(self._request_handler, GatewayRequestHandler): - await self._request_handler.streamer._get_endpoints_input_output_models(is_cancel=self.is_cancel) + await self._request_handler.streamer._get_endpoints_input_output_models( + is_cancel=self.is_cancel + ) self._request_handler.streamer._validate_flow_docarray_compatibility() - self.app = self._request_handler._websocket_fastapi_default_app(tracing=self.tracing, tracer_provider=self.tracer_provider) + self.app = self._request_handler._websocket_fastapi_default_app( + tracing=self.tracing, tracer_provider=self.tracer_provider + ) with ImportExtensions(required=True): from uvicorn import Config, Server diff --git a/jina/serve/runtimes/worker/batch_queue.py b/jina/serve/runtimes/worker/batch_queue.py index bde16beb151c9..530f5f58d3a81 100644 --- a/jina/serve/runtimes/worker/batch_queue.py +++ b/jina/serve/runtimes/worker/batch_queue.py @@ -73,13 +73,10 @@ def _cancel_timer_if_pending(self): def _start_timer(self): self._cancel_timer_if_pending() - self._timer_task = asyncio.create_task( - self._sleep_then_set() - ) + self._timer_task = asyncio.create_task(self._sleep_then_set()) async def _sleep_then_set(self): - """Sleep and then set the event - """ + """Sleep and then set the event""" self._timer_finished = False await asyncio.sleep(self._timeout / 1000) self._flush_trigger.set() @@ -275,7 +272,9 @@ def batch(iterable_1, iterable_2, n=1): await request_full.put(exc) else: # We need to attribute the docs to their requests - non_assigned_to_response_docs.extend(batch_res_docs or docs_inner_batch) + non_assigned_to_response_docs.extend( + batch_res_docs or docs_inner_batch + ) non_assigned_to_response_request_idxs.extend(req_idxs) num_assigned_docs = await _assign_results( non_assigned_to_response_docs, diff --git a/jina/serve/runtimes/worker/http_sagemaker_app.py b/jina/serve/runtimes/worker/http_sagemaker_app.py index 12ef0d2bb476a..e44082afc57f2 100644 --- a/jina/serve/runtimes/worker/http_sagemaker_app.py +++ b/jina/serve/runtimes/worker/http_sagemaker_app.py @@ -78,6 +78,7 @@ def add_post_route( ): import json from typing import List, Type, Union + try: from typing import get_args, get_origin except ImportError: diff --git a/jina/serve/runtimes/worker/request_handling.py b/jina/serve/runtimes/worker/request_handling.py index 0849aaebb388d..2e095cb26da50 100644 --- a/jina/serve/runtimes/worker/request_handling.py +++ b/jina/serve/runtimes/worker/request_handling.py @@ -51,16 +51,16 @@ class WorkerRequestHandler: _KEY_RESULT = '__results__' def __init__( - self, - args: 'argparse.Namespace', - logger: 'JinaLogger', - metrics_registry: Optional['CollectorRegistry'] = None, - tracer_provider: Optional['trace.TracerProvider'] = None, - meter_provider: Optional['metrics.MeterProvider'] = None, - meter=None, - tracer=None, - deployment_name: str = '', - **kwargs, + self, + args: 'argparse.Namespace', + logger: 'JinaLogger', + metrics_registry: Optional['CollectorRegistry'] = None, + tracer_provider: Optional['trace.TracerProvider'] = None, + meter_provider: Optional['metrics.MeterProvider'] = None, + meter=None, + tracer=None, + deployment_name: str = '', + **kwargs, ): """Initialize private parameters and execute private loading functions. @@ -83,8 +83,8 @@ def __init__( self._is_closed = False if self.metrics_registry: with ImportExtensions( - required=True, - help_text='You need to install the `prometheus_client` to use the montitoring functionality of jina', + required=True, + help_text='You need to install the `prometheus_client` to use the montitoring functionality of jina', ): from prometheus_client import Counter, Summary @@ -178,6 +178,7 @@ def call_handle(request): ] return self.process_single_data(request, None, is_generator=is_generator) + app = get_fastapi_app( request_models_map=request_models_map, caller=call_handle, **kwargs ) @@ -228,9 +229,9 @@ async def _hot_reload(self): watched_files.add(extra_python_file) with ImportExtensions( - required=True, - logger=self.logger, - help_text='''hot reload requires watchfiles dependency to be installed. You can do `pip install + required=True, + logger=self.logger, + help_text='''hot reload requires watchfiles dependency to be installed. You can do `pip install watchfiles''', ): from watchfiles import awatch @@ -297,14 +298,14 @@ def _init_batchqueue_dict(self): } def _init_monitoring( - self, - metrics_registry: Optional['CollectorRegistry'] = None, - meter: Optional['metrics.Meter'] = None, + self, + metrics_registry: Optional['CollectorRegistry'] = None, + meter: Optional['metrics.Meter'] = None, ): if metrics_registry: with ImportExtensions( - required=True, - help_text='You need to install the `prometheus_client` to use the montitoring functionality of jina', + required=True, + help_text='You need to install the `prometheus_client` to use the montitoring functionality of jina', ): from prometheus_client import Counter, Summary @@ -360,10 +361,10 @@ def _init_monitoring( self._sent_response_size_histogram = None def _load_executor( - self, - metrics_registry: Optional['CollectorRegistry'] = None, - tracer_provider: Optional['trace.TracerProvider'] = None, - meter_provider: Optional['metrics.MeterProvider'] = None, + self, + metrics_registry: Optional['CollectorRegistry'] = None, + tracer_provider: Optional['trace.TracerProvider'] = None, + meter_provider: Optional['metrics.MeterProvider'] = None, ): """ Load the executor to this runtime, specified by ``uses`` CLI argument. @@ -577,8 +578,8 @@ def _setup_req_doc_array_cls(self, requests, exec_endpoint, is_response=False): req.document_array_cls = DocumentArray else: if ( - not endpoint_info.is_generator - and not endpoint_info.is_singleton_doc + not endpoint_info.is_generator + and not endpoint_info.is_singleton_doc ): req.document_array_cls = ( endpoint_info.request_schema @@ -595,9 +596,9 @@ def _setup_req_doc_array_cls(self, requests, exec_endpoint, is_response=False): pass def _setup_requests( - self, - requests: List['DataRequest'], - exec_endpoint: str, + self, + requests: List['DataRequest'], + exec_endpoint: str, ): """Execute a request using the executor. @@ -613,7 +614,7 @@ def _setup_requests( return requests, params async def handle_generator( - self, requests: List['DataRequest'], tracing_context: Optional['Context'] = None + self, requests: List['DataRequest'], tracing_context: Optional['Context'] = None ) -> Generator: """Prepares and executes a request for generator endpoints. @@ -648,7 +649,7 @@ async def handle_generator( ) async def handle( - self, requests: List['DataRequest'], tracing_context: Optional['Context'] = None + self, requests: List['DataRequest'], tracing_context: Optional['Context'] = None ) -> DataRequest: """Initialize private parameters and execute private loading functions. @@ -679,8 +680,12 @@ async def handle( if param_key not in self._batchqueue_instances[exec_endpoint]: self._batchqueue_instances[exec_endpoint][param_key] = BatchQueue( functools.partial(self._executor.__acall__, exec_endpoint), - request_docarray_cls=self._executor.requests[exec_endpoint].request_schema, - response_docarray_cls=self._executor.requests[exec_endpoint].response_schema, + request_docarray_cls=self._executor.requests[ + exec_endpoint + ].request_schema, + response_docarray_cls=self._executor.requests[ + exec_endpoint + ].response_schema, output_array_type=self.args.output_array_type, params=params, **self._batchqueue_config[exec_endpoint], @@ -722,7 +727,7 @@ async def handle( @staticmethod def replace_docs( - request: List['DataRequest'], docs: 'DocumentArray', ndarray_type: str = None + request: List['DataRequest'], docs: 'DocumentArray', ndarray_type: str = None ) -> None: """Replaces the docs in a message with new Documents. @@ -770,7 +775,7 @@ async def close(self): @staticmethod def _get_docs_matrix_from_request( - requests: List['DataRequest'], + requests: List['DataRequest'], ) -> Tuple[Optional[List['DocumentArray']], Optional[Dict[str, 'DocumentArray']]]: """ Returns a docs matrix from a list of DataRequest objects. @@ -794,7 +799,7 @@ def _get_docs_matrix_from_request( @staticmethod def get_parameters_dict_from_request( - requests: List['DataRequest'], + requests: List['DataRequest'], ) -> 'Dict': """ Returns a parameters dict from a list of DataRequest objects. @@ -814,7 +819,7 @@ def get_parameters_dict_from_request( @staticmethod def get_docs_from_request( - requests: List['DataRequest'], + requests: List['DataRequest'], ) -> 'DocumentArray': """ Gets a field from the message @@ -894,7 +899,7 @@ def reduce_requests(requests: List['DataRequest']) -> 'DataRequest': # serving part async def process_single_data( - self, request: DataRequest, context, is_generator: bool = False + self, request: DataRequest, context, is_generator: bool = False ) -> DataRequest: """ Process the received requests and return the result as a new request @@ -908,7 +913,7 @@ async def process_single_data( return await self.process_data([request], context, is_generator=is_generator) async def stream_doc( - self, request: SingleDocumentRequest, context: 'grpc.aio.ServicerContext' + self, request: SingleDocumentRequest, context: 'grpc.aio.ServicerContext' ) -> SingleDocumentRequest: """ Process the received requests and return the result as a new request, used for streaming behavior, one doc IN, several out @@ -935,9 +940,12 @@ async def stream_doc( if not is_generator: ex = ValueError('endpoint must be generator') self.logger.error( - f'{ex!r}' + f'\n add "--quiet-error" to suppress the exception details' - if not self.args.quiet_error - else '', + ( + f'{ex!r}' + + f'\n add "--quiet-error" to suppress the exception details' + if not self.args.quiet_error + else '' + ), exc_info=not self.args.quiet_error, ) request.add_exception(ex) @@ -969,10 +977,12 @@ async def stream_doc( f'output document type {doc.__class__.__name__} does not match the endpoint output type {request_endpoint.response_schema.__name__}' ) self.logger.error( - f'{ex!r}' - + f'\n add "--quiet-error" to suppress the exception details' - if not self.args.quiet_error - else '', + ( + f'{ex!r}' + + f'\n add "--quiet-error" to suppress the exception details' + if not self.args.quiet_error + else '' + ), exc_info=not self.args.quiet_error, ) req = SingleDocumentRequest() @@ -1034,7 +1044,7 @@ async def endpoint_discovery(self, empty, context) -> jina_pb2.EndpointsProto: return endpoints_proto def _extract_tracing_context( - self, metadata: 'grpc.aio.Metadata' + self, metadata: 'grpc.aio.Metadata' ) -> Optional['Context']: if self.tracer: from opentelemetry.propagate import extract @@ -1045,7 +1055,7 @@ def _extract_tracing_context( return None async def process_data( - self, requests: List[DataRequest], context, is_generator: bool = False + self, requests: List[DataRequest], context, is_generator: bool = False ) -> DataRequest: """ Process the received requests and return the result as a new request @@ -1057,7 +1067,7 @@ async def process_data( """ self.logger.debug('recv a process_data request') with MetricsTimer( - self._summary, self._receiving_request_seconds, self._metric_attributes + self._summary, self._receiving_request_seconds, self._metric_attributes ): try: if self.logger.debug_enabled: @@ -1095,10 +1105,12 @@ async def process_data( return result except (RuntimeError, Exception) as ex: self.logger.error( - f'{ex!r}' - + f'\n add "--quiet-error" to suppress the exception details' - if not self.args.quiet_error - else '', + ( + f'{ex!r}' + + f'\n add "--quiet-error" to suppress the exception details' + if not self.args.quiet_error + else '' + ), exc_info=not self.args.quiet_error, ) @@ -1113,8 +1125,8 @@ async def process_data( ) if ( - self.args.exit_on_exceptions - and type(ex).__name__ in self.args.exit_on_exceptions + self.args.exit_on_exceptions + and type(ex).__name__ in self.args.exit_on_exceptions ): self.logger.info('Exiting because of "--exit-on-exceptions".') raise RuntimeTerminated @@ -1138,7 +1150,7 @@ async def _status(self, empty, context) -> jina_pb2.JinaInfoProto: return info_proto async def stream( - self, request_iterator, context=None, *args, **kwargs + self, request_iterator, context=None, *args, **kwargs ) -> AsyncIterator['Request']: """ stream requests from client iterator and stream responses back. @@ -1156,8 +1168,8 @@ async def stream( Call = stream def _create_snapshot_status( - self, - snapshot_directory: str, + self, + snapshot_directory: str, ) -> 'jina_pb2.SnapshotStatusProto': _id = str(uuid.uuid4()) self.logger.debug(f'Generated snapshot id: {_id}') @@ -1170,7 +1182,7 @@ def _create_snapshot_status( ) def _create_restore_status( - self, + self, ) -> 'jina_pb2.SnapshotStatusProto': _id = str(uuid.uuid4()) self.logger.debug(f'Generated restore id: {_id}') @@ -1189,9 +1201,9 @@ async def snapshot(self, request, context) -> 'jina_pb2.SnapshotStatusProto': """ self.logger.debug('Calling snapshot') if ( - self._snapshot - and self._snapshot_thread - and self._snapshot_thread.is_alive() + self._snapshot + and self._snapshot_thread + and self._snapshot_thread.is_alive() ): raise RuntimeError( f'A snapshot with id {self._snapshot.id.value} is currently in progress. Cannot start another.' @@ -1209,7 +1221,7 @@ async def snapshot(self, request, context) -> 'jina_pb2.SnapshotStatusProto': return self._snapshot async def snapshot_status( - self, request: 'jina_pb2.SnapshotId', context + self, request: 'jina_pb2.SnapshotId', context ) -> 'jina_pb2.SnapshotStatusProto': """ method to start a snapshot process of the Executor @@ -1271,7 +1283,7 @@ async def restore(self, request: 'jina_pb2.RestoreSnapshotCommand', context): return self._restore async def restore_status( - self, request, context + self, request, context ) -> 'jina_pb2.RestoreSnapshotStatusProto': """ method to start a snapshot process of the Executor diff --git a/jina/serve/stream/__init__.py b/jina/serve/stream/__init__.py index f7b064bd0ace4..03c488d78f114 100644 --- a/jina/serve/stream/__init__.py +++ b/jina/serve/stream/__init__.py @@ -152,7 +152,9 @@ async def stream_doc( f'Error while getting responses from deployments: {err.details()}' ) raise - except Exception as err: # HTTP and WS need different treatment further up the stack + except ( + Exception + ) as err: # HTTP and WS need different treatment further up the stack self.logger.error(f'Error while getting responses from deployments: {err}') raise err @@ -215,7 +217,9 @@ async def stream( f'Error while getting responses from deployments: {err.details()}' ) raise - except Exception as err: # HTTP and WS need different treatment further up the stack + except ( + Exception + ) as err: # HTTP and WS need different treatment further up the stack self.logger.error(f'Error while getting responses from deployments: {err}') raise err diff --git a/jina/serve/stream/helper.py b/jina/serve/stream/helper.py index 4252a44da8e75..a9d15922e7430 100644 --- a/jina/serve/stream/helper.py +++ b/jina/serve/stream/helper.py @@ -29,11 +29,11 @@ class AsyncRequestsIterator: """Iterator to allow async iteration of blocking/non-blocking iterator from the Client""" def __init__( - self, - iterator: Union[Iterator, AsyncIterator], - request_counter: Optional[_RequestsCounter] = None, - prefetch: int = 0, - iterate_sync_in_thread: bool = True, + self, + iterator: Union[Iterator, AsyncIterator], + request_counter: Optional[_RequestsCounter] = None, + prefetch: int = 0, + iterate_sync_in_thread: bool = True, ) -> None: """Async request iterator @@ -70,6 +70,7 @@ async def __anext__(self): """ if not self._iterate_sync_in_thread: + async def _get_next(): try: req = self.iterator.__next__() diff --git a/jina/types/request/data.py b/jina/types/request/data.py index 427a447854dcc..c3fd12822e8c1 100644 --- a/jina/types/request/data.py +++ b/jina/types/request/data.py @@ -25,9 +25,9 @@ class DataRequest(Request): class _DataContent: def __init__( - self, - content: 'jina_pb2.DataRequestProto.DataContentProto', - document_array_cls: Type[DocumentArray], + self, + content: 'jina_pb2.DataRequestProto.DataContentProto', + document_array_cls: Type[DocumentArray], ): self._content = content self._loaded_doc_array = None @@ -59,7 +59,7 @@ def docs(self, value: DocumentArray): self.set_docs_convert_arrays(value) def set_docs_convert_arrays( - self, value: DocumentArray, ndarray_type: Optional[str] = None + self, value: DocumentArray, ndarray_type: Optional[str] = None ): """Convert embedding and tensor to given type, then set DocumentArray :param value: a DocumentArray @@ -107,8 +107,8 @@ def docs_bytes(self, value: bytes): """ def __init__( - self, - request: Optional[RequestSourceType] = None, + self, + request: Optional[RequestSourceType] = None, ): self.buffer = None self._pb_body = None @@ -184,7 +184,7 @@ def is_decompressed_wo_data(self) -> bool: @property def proto_wo_data( - self, + self, ) -> Union['jina_pb2.DataRequestProtoWoData', 'jina_pb2.DataRequestProto']: """ Transform the current buffer to a :class:`jina_pb2.DataRequestProtoWoData` unless the full proto has already @@ -198,7 +198,7 @@ def proto_wo_data( @property def proto( - self, + self, ) -> Union['jina_pb2.DataRequestProto', 'jina_pb2.DataRequestProtoWoData']: """ Cast ``self`` to a :class:`jina_pb2.DataRequestProto` or a :class:`jina_pb2.DataRequestProto`. Laziness will be broken and serialization will be recomputed when calling. @@ -212,7 +212,7 @@ def proto( @property def proto_with_data( - self, + self, ) -> 'jina_pb2.DataRequestProto': """ Cast ``self`` to a :class:`jina_pb2.DataRequestProto`. Laziness will be broken and serialization will be recomputed when calling. @@ -309,6 +309,7 @@ def parameters(self, value: Dict): parameters = value if docarray_v2: from pydantic import BaseModel + if isinstance(value, BaseModel): parameters = dict(value) self.proto_wo_data.parameters.update(parameters) @@ -403,9 +404,9 @@ class SingleDocumentRequest(Request): class _DataContent: def __init__( - self, - content, - document_cls: Type['Document'], + self, + content, + document_cls: Type['Document'], ): self._content = content self._loaded_document = None @@ -417,9 +418,7 @@ def doc(self) -> 'Document': .. # noqa: DAR201""" if not self._loaded_document: - self._loaded_document = self.document_cls.from_protobuf( - self._content - ) + self._loaded_document = self.document_cls.from_protobuf(self._content) return self._loaded_document @@ -434,8 +433,8 @@ def doc(self, value: 'Document'): self._content.CopyFrom(value.to_protobuf()) def __init__( - self, - request: Optional[jina_pb2.SingleDocumentRequestProto] = None, + self, + request: Optional[jina_pb2.SingleDocumentRequestProto] = None, ): self.buffer = None self._pb_body = None @@ -512,8 +511,10 @@ def is_decompressed_wo_data(self) -> bool: @property def proto_wo_data( - self, - ) -> Union['jina_pb2.DataRequestProtoWoData', 'jina_pb2.SingleDocumentRequestProto']: + self, + ) -> Union[ + 'jina_pb2.DataRequestProtoWoData', 'jina_pb2.SingleDocumentRequestProto' + ]: """ Transform the current buffer to a :class:`jina_pb2.DataRequestProtoWoData` unless the full proto has already been initialized or . Laziness will be broken and serialization will be recomputed when @@ -526,8 +527,10 @@ def proto_wo_data( @property def proto( - self, - ) -> Union['jina_pb2.SingleDocumentRequestProto', 'jina_pb2.DataRequestProtoWoData']: + self, + ) -> Union[ + 'jina_pb2.SingleDocumentRequestProto', 'jina_pb2.DataRequestProtoWoData' + ]: """ Cast ``self`` to a :class:`jina_pb2.DataRequestProto` or a :class:`jina_pb2.DataRequestProto`. Laziness will be broken and serialization will be recomputed when calling. it returns the underlying proto if it already exists (even if he is loaded without data) or creates a new one. @@ -540,7 +543,7 @@ def proto( @property def proto_with_data( - self, + self, ) -> 'jina_pb2.SingleDocumentRequestProto': """ Cast ``self`` to a :class:`jina_pb2.DataRequestProto`. Laziness will be broken and serialization will be recomputed when calling. @@ -663,6 +666,7 @@ def parameters(self, value: Dict): parameters = value if docarray_v2: from pydantic import BaseModel + if isinstance(value, BaseModel): parameters = dict(value) self.proto_wo_data.parameters.update(parameters) diff --git a/tests/conftest.py b/tests/conftest.py index 3a7d007bb4fd8..6f400bf936c37 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -89,9 +89,10 @@ def event_loop(request): yield loop loop.close() + @pytest.fixture(autouse=True) def set_test_pip_version() -> None: os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip' yield - if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed + if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed del os.environ['JINA_GATEWAY_IMAGE'] diff --git a/tests/docker_compose/test_deployment_docker_compose.py b/tests/docker_compose/test_deployment_docker_compose.py index 695614d5b4e9c..d784a49d73a7b 100644 --- a/tests/docker_compose/test_deployment_docker_compose.py +++ b/tests/docker_compose/test_deployment_docker_compose.py @@ -21,10 +21,10 @@ async def run_test(port, endpoint, num_docs=10, request_size=10, protocol='grpc' client.show_progress = True responses = [] async for resp in client.post( - endpoint, - inputs=[Document() for _ in range(num_docs)], - request_size=request_size, - return_responses=True, + endpoint, + inputs=[Document() for _ in range(num_docs)], + request_size=request_size, + return_responses=True, ): responses.append(resp) @@ -119,13 +119,20 @@ async def test_deployment_monitoring(tmpdir, docker_images, port_generator): indirect=True, ) @pytest.mark.parametrize('polling', ['ANY', 'ALL']) -async def test_deployment_with_replicas_with_sharding(deployment_with_replicas_with_sharding, polling, tmpdir): - dump_path = os.path.join(str(tmpdir), 'docker-compose-deployment-with-replicas-with-sharding.yml') +async def test_deployment_with_replicas_with_sharding( + deployment_with_replicas_with_sharding, polling, tmpdir +): + dump_path = os.path.join( + str(tmpdir), 'docker-compose-deployment-with-replicas-with-sharding.yml' + ) deployment_with_replicas_with_sharding.to_docker_compose_yaml(dump_path) with DockerComposeServices(dump_path): resp = await run_test( - port=deployment_with_replicas_with_sharding.port, endpoint='/debug', num_docs=10, request_size=1 + port=deployment_with_replicas_with_sharding.port, + endpoint='/debug', + num_docs=10, + request_size=1, ) assert len(resp) == 10 @@ -171,13 +178,20 @@ async def test_deployment_with_replicas_with_sharding(deployment_with_replicas_w indirect=True, ) @pytest.mark.parametrize('polling', ['ANY', 'ALL']) -async def test_deployment_without_replicas_with_sharding(deployment_without_replicas_with_sharding, polling, tmpdir): - dump_path = os.path.join(str(tmpdir), 'docker-compose-deployment-without-replicas-with-sharding.yml') +async def test_deployment_without_replicas_with_sharding( + deployment_without_replicas_with_sharding, polling, tmpdir +): + dump_path = os.path.join( + str(tmpdir), 'docker-compose-deployment-without-replicas-with-sharding.yml' + ) deployment_without_replicas_with_sharding.to_docker_compose_yaml(dump_path) with DockerComposeServices(dump_path): resp = await run_test( - port=deployment_without_replicas_with_sharding.port, endpoint='/debug', num_docs=10, request_size=1 + port=deployment_without_replicas_with_sharding.port, + endpoint='/debug', + num_docs=10, + request_size=1, ) assert len(resp) == 10 @@ -215,13 +229,20 @@ async def test_deployment_without_replicas_with_sharding(deployment_without_repl [['test-executor', 'jinaai/jina']], indirect=True, ) -async def test_deployment_with_replicas_without_sharding(deployment_with_replicas_without_sharding, tmpdir): - dump_path = os.path.join(str(tmpdir), 'docker-compose-deployment-with-replicas-without-sharding.yml') +async def test_deployment_with_replicas_without_sharding( + deployment_with_replicas_without_sharding, tmpdir +): + dump_path = os.path.join( + str(tmpdir), 'docker-compose-deployment-with-replicas-without-sharding.yml' + ) deployment_with_replicas_without_sharding.to_docker_compose_yaml(dump_path) with DockerComposeServices(dump_path): resp = await run_test( - port=deployment_with_replicas_without_sharding.port, endpoint='/debug', num_docs=10, request_size=1 + port=deployment_with_replicas_without_sharding.port, + endpoint='/debug', + num_docs=10, + request_size=1, ) assert len(resp) == 10 @@ -253,13 +274,20 @@ async def test_deployment_with_replicas_without_sharding(deployment_with_replica [['test-executor', 'jinaai/jina']], indirect=True, ) -async def test_deployment_without_replicas_without_sharding(deployment_without_replicas_without_sharding, tmpdir): - dump_path = os.path.join(str(tmpdir), 'docker-compose-deployment-without-replicas-without-sharding.yml') +async def test_deployment_without_replicas_without_sharding( + deployment_without_replicas_without_sharding, tmpdir +): + dump_path = os.path.join( + str(tmpdir), 'docker-compose-deployment-without-replicas-without-sharding.yml' + ) deployment_without_replicas_without_sharding.to_docker_compose_yaml(dump_path) with DockerComposeServices(dump_path): resp = await run_test( - port=deployment_without_replicas_without_sharding.port, endpoint='/debug', num_docs=10, request_size=1 + port=deployment_without_replicas_without_sharding.port, + endpoint='/debug', + num_docs=10, + request_size=1, ) assert len(resp) == 10 @@ -283,7 +311,6 @@ async def test_deployment_without_replicas_without_sharding(deployment_without_r assert len(runtimes_to_visit) == 0 - @pytest.mark.timeout(3600) @pytest.mark.asyncio @pytest.mark.parametrize( diff --git a/tests/integration/concurrent_clients/test_concurrent_clients.py b/tests/integration/concurrent_clients/test_concurrent_clients.py index 31b426cf8019a..546fe94da3314 100644 --- a/tests/integration/concurrent_clients/test_concurrent_clients.py +++ b/tests/integration/concurrent_clients/test_concurrent_clients.py @@ -23,10 +23,13 @@ def ping(self, **kwargs): @pytest.mark.parametrize('prefetch', [1, 10]) @pytest.mark.parametrize('concurrent', [15]) @pytest.mark.parametrize('use_stream', [False, True]) -def test_concurrent_clients(concurrent, protocol, shards, polling, prefetch, reraise, use_stream): +def test_concurrent_clients( + concurrent, protocol, shards, polling, prefetch, reraise, use_stream +): if not use_stream and protocol != 'grpc': return + def pong(peer_hash, queue, resp: Response): for d in resp.docs: queue.put((peer_hash, d.text)) @@ -39,7 +42,7 @@ def peer_client(port, protocol, peer_hash, queue): Document(text=peer_hash), on_done=lambda r: pong(peer_hash, queue, r), return_responses=True, - stream=use_stream + stream=use_stream, ) f = Flow(protocol=protocol, prefetch=prefetch).add( diff --git a/tests/integration/conditions_feature/test_condition_behavior.py b/tests/integration/conditions_feature/test_condition_behavior.py index d51ff23936ac0..f249adea867b9 100644 --- a/tests/integration/conditions_feature/test_condition_behavior.py +++ b/tests/integration/conditions_feature/test_condition_behavior.py @@ -12,7 +12,9 @@ class ConditionDumpExecutor(Executor): @requests def foo(self, docs, **kwargs): with open( - os.path.join(str(self.workspace), f'{self.metas.name}.txt'), 'w', encoding='utf-8' + os.path.join(str(self.workspace), f'{self.metas.name}.txt'), + 'w', + encoding='utf-8', ) as fp: for doc in docs: fp.write(doc.text) @@ -112,10 +114,14 @@ def test_conditions_filtering(tmpdir, flow): assert types_set == {1, 2} - with open(os.path.join(str(tmpdir), 'exec1', '0', f'exec1.txt'), 'r', encoding='utf-8') as fp: + with open( + os.path.join(str(tmpdir), 'exec1', '0', f'exec1.txt'), 'r', encoding='utf-8' + ) as fp: assert fp.read() == 'type1' - with open(os.path.join(str(tmpdir), 'exec2', '0', f'exec2.txt'), 'r', encoding='utf-8') as fp: + with open( + os.path.join(str(tmpdir), 'exec2', '0', f'exec2.txt'), 'r', encoding='utf-8' + ) as fp: assert fp.read() == 'type2' @@ -153,13 +159,15 @@ def test_conditions_filtering_on_joiner(tmpdir): with open( os.path.join(str(tmpdir), 'joiner_test_exec1', '0', f'joiner_test_exec1.txt'), - 'r', encoding='utf-8', + 'r', + encoding='utf-8', ) as fp: assert fp.read() == 'type1type2' with open( os.path.join(str(tmpdir), 'joiner_test_exec2', '0', f'joiner_test_exec2.txt'), - 'r', encoding='utf-8', + 'r', + encoding='utf-8', ) as fp: assert fp.read() == 'type1type2' diff --git a/tests/integration/deployments/test_deployment.py b/tests/integration/deployments/test_deployment.py index f28ef122223e6..6c887687aa97a 100644 --- a/tests/integration/deployments/test_deployment.py +++ b/tests/integration/deployments/test_deployment.py @@ -373,8 +373,7 @@ async def async_inputs(): class DummyExecutor(Executor): @requests(on='/foo') - def foo(self, docs, **kwargs): - ... + def foo(self, docs, **kwargs): ... @pytest.mark.parametrize( @@ -490,14 +489,14 @@ class PIDExecutor(Executor): @requests def foo(self, docs, **kwargs): import os + for doc in docs: doc.tags['pid'] = os.getpid() - dep = Deployment(uses=PIDExecutor, shards=shards, replicas=replicas) with dep: docs = dep.post(on='/', inputs=DocumentArray.empty(20), request_size=1) returned_pids = set([doc.tags['pid'] for doc in docs]) - assert len(returned_pids) == shards * replicas \ No newline at end of file + assert len(returned_pids) == shards * replicas diff --git a/tests/integration/distributed-replicas/test_distributed_replicas.py b/tests/integration/distributed-replicas/test_distributed_replicas.py index 50d66fdfeded1..fd676344d3ee9 100644 --- a/tests/integration/distributed-replicas/test_distributed_replicas.py +++ b/tests/integration/distributed-replicas/test_distributed_replicas.py @@ -132,7 +132,9 @@ def test_distributed_replicas_hosts_mismatch(input_docs): 'use_stream', [True, False], ) -def test_distributed_replicas_host_parsing(input_docs, hosts_as_list, ports_as_list, use_stream): +def test_distributed_replicas_host_parsing( + input_docs, hosts_as_list, ports_as_list, use_stream +): port1, port2 = random_port(), random_port() args1, args2 = _external_deployment_args( num_shards=1, port=port1 diff --git a/tests/integration/docarray_v2/docker/executor1/executor.py b/tests/integration/docarray_v2/docker/executor1/executor.py index 6d1696987e405..315dfff3290c9 100644 --- a/tests/integration/docarray_v2/docker/executor1/executor.py +++ b/tests/integration/docarray_v2/docker/executor1/executor.py @@ -4,6 +4,7 @@ from jina import Executor, requests import numpy as np + class MyDoc(BaseDoc): text: str embedding: Optional[NdArray] = None @@ -11,9 +12,9 @@ class MyDoc(BaseDoc): class Encoder(Executor): def __init__( - self, - *args, - **kwargs, + self, + *args, + **kwargs, ): super().__init__(*args, **kwargs) diff --git a/tests/integration/docarray_v2/docker/test_with_docker.py b/tests/integration/docarray_v2/docker/test_with_docker.py index 815c4f794764c..91238a288569f 100644 --- a/tests/integration/docarray_v2/docker/test_with_docker.py +++ b/tests/integration/docarray_v2/docker/test_with_docker.py @@ -37,21 +37,29 @@ class MyDocWithMatches(MyDoc): matches: DocList[MyDoc] = [] scores: List[float] = [] - f = Flow(protocol=protocol).add(uses='docker://encoder-executor').add(uses='docker://indexer-executor') + f = ( + Flow(protocol=protocol) + .add(uses='docker://encoder-executor') + .add(uses='docker://indexer-executor') + ) with f: if protocol == 'http': resp = general_requests.get(f'http://localhost:{f.port}/openapi.json') resp.json() - sentences = ['This framework generates embeddings for each input sentence', - 'Sentences are passed as a list of string.', - 'The quick brown fox jumps over the lazy dog.'] + sentences = [ + 'This framework generates embeddings for each input sentence', + 'Sentences are passed as a list of string.', + 'The quick brown fox jumps over the lazy dog.', + ] inputs = DocList[MyDoc]([MyDoc(text=sentence) for sentence in sentences]) f.post(on='/index', inputs=inputs) queries = inputs[0:2] - search_results = f.post(on='/search', inputs=queries, return_type=DocList[MyDocWithMatches]) + search_results = f.post( + on='/search', inputs=queries, return_type=DocList[MyDocWithMatches] + ) assert len(search_results) == len(queries) for result in search_results: diff --git a/tests/integration/docarray_v2/issues/github_6137/test_issue.py b/tests/integration/docarray_v2/issues/github_6137/test_issue.py index 9dc91f9098963..00654cdc01a38 100644 --- a/tests/integration/docarray_v2/issues/github_6137/test_issue.py +++ b/tests/integration/docarray_v2/issues/github_6137/test_issue.py @@ -14,17 +14,31 @@ class SearchResult(BaseDoc): class InitialExecutor(Executor): @requests(on='/search') - async def search(self, docs: DocList[SearchResult], **kwargs) -> DocList[SearchResult]: + async def search( + self, docs: DocList[SearchResult], **kwargs + ) -> DocList[SearchResult]: return docs - f = ( - Flow(protocol='http') - .add(name='initial', uses=InitialExecutor) - ) + f = Flow(protocol='http').add(name='initial', uses=InitialExecutor) with f: - resp = f.post(on='/search', inputs=DocList[SearchResult]([SearchResult(results=DocList[QuoteFile]( - [QuoteFile(quote_file_id=999, texts=DocList[TextDoc]([TextDoc(text='hey here')]))]))]), - return_type=DocList[SearchResult]) + resp = f.post( + on='/search', + inputs=DocList[SearchResult]( + [ + SearchResult( + results=DocList[QuoteFile]( + [ + QuoteFile( + quote_file_id=999, + texts=DocList[TextDoc]([TextDoc(text='hey here')]), + ) + ] + ) + ) + ] + ), + return_type=DocList[SearchResult], + ) assert resp[0].results[0].quote_file_id == 999 assert resp[0].results[0].texts[0].text == 'hey here' diff --git a/tests/integration/docarray_v2/sagemaker/test_embedding.py b/tests/integration/docarray_v2/sagemaker/test_embedding.py index eb86a6e2178b2..a2233f0789dbe 100644 --- a/tests/integration/docarray_v2/sagemaker/test_embedding.py +++ b/tests/integration/docarray_v2/sagemaker/test_embedding.py @@ -32,9 +32,7 @@ def test_provider_sagemaker_pod_inference(): args, _ = set_pod_parser().parse_known_args( [ '--uses', - os.path.join( - os.path.dirname(__file__), "SampleExecutor", "config.yml" - ), + os.path.join(os.path.dirname(__file__), "SampleExecutor", "config.yml"), '--provider', 'sagemaker', 'serve', # This is added by sagemaker @@ -73,9 +71,7 @@ def test_provider_sagemaker_pod_batch_transform_valid(filename): args, _ = set_pod_parser().parse_known_args( [ '--uses', - os.path.join( - os.path.dirname(__file__), "SampleExecutor", "config.yml" - ), + os.path.join(os.path.dirname(__file__), "SampleExecutor", "config.yml"), '--provider', 'sagemaker', 'serve', # This is added by sagemaker @@ -88,10 +84,10 @@ def test_provider_sagemaker_pod_batch_transform_valid(filename): csv_data = f.read() for line in csv.reader( - io.StringIO(csv_data), - delimiter=",", - quoting=csv.QUOTE_NONE, - escapechar="\\", + io.StringIO(csv_data), + delimiter=",", + quoting=csv.QUOTE_NONE, + escapechar="\\", ): texts.append(line[1]) @@ -115,9 +111,7 @@ def test_provider_sagemaker_pod_batch_transform_invalid(): args, _ = set_pod_parser().parse_known_args( [ '--uses', - os.path.join( - os.path.dirname(__file__), "SampleExecutor", "config.yml" - ), + os.path.join(os.path.dirname(__file__), "SampleExecutor", "config.yml"), '--provider', 'sagemaker', 'serve', # This is added by sagemaker @@ -126,7 +120,7 @@ def test_provider_sagemaker_pod_batch_transform_invalid(): with Pod(args): # Test `POST /invocations` endpoint for batch-transform with invalid input with open( - os.path.join(os.path.dirname(__file__), 'invalid_input.csv'), 'r' + os.path.join(os.path.dirname(__file__), 'invalid_input.csv'), 'r' ) as f: csv_data = f.read() @@ -140,17 +134,19 @@ def test_provider_sagemaker_pod_batch_transform_invalid(): ) assert resp.status_code == 400 assert ( - resp.json()['detail'] - == "Invalid CSV format. Line ['abcd'] doesn't match the expected field " - "order ['id', 'text']." + resp.json()['detail'] + == "Invalid CSV format. Line ['abcd'] doesn't match the expected field " + "order ['id', 'text']." ) def test_provider_sagemaker_deployment_inference(): dep_port = random_port() - with Deployment(uses=os.path.join( - os.path.dirname(__file__), "SampleExecutor", "config.yml" - ), provider='sagemaker', port=dep_port): + with Deployment( + uses=os.path.join(os.path.dirname(__file__), "SampleExecutor", "config.yml"), + provider='sagemaker', + port=dep_port, + ): # Test the `GET /ping` endpoint (added by jina for sagemaker) rsp = requests.get(f'http://localhost:{dep_port}/ping') assert rsp.status_code == 200 @@ -175,7 +171,7 @@ def test_provider_sagemaker_deployment_inference(): def test_provider_sagemaker_deployment_inference_docker(replica_docker_image_built): dep_port = random_port() with Deployment( - uses='docker://sampler-executor', provider='sagemaker', port=dep_port + uses='docker://sampler-executor', provider='sagemaker', port=dep_port ): # Test the `GET /ping` endpoint (added by jina for sagemaker) rsp = requests.get(f'http://localhost:{dep_port}/ping') @@ -201,13 +197,13 @@ def test_provider_sagemaker_deployment_inference_docker(replica_docker_image_bui @pytest.mark.skip('Sagemaker with Deployment for batch-transform is not supported yet') def test_provider_sagemaker_deployment_batch(): dep_port = random_port() - with Deployment(uses=os.path.join( - os.path.dirname(__file__), "SampleExecutor", "config.yml" - ), provider='sagemaker', port=dep_port): + with Deployment( + uses=os.path.join(os.path.dirname(__file__), "SampleExecutor", "config.yml"), + provider='sagemaker', + port=dep_port, + ): # Test the `POST /invocations` endpoint for batch-transform - with open( - os.path.join(os.path.dirname(__file__), 'valid_input.csv'), 'r' - ) as f: + with open(os.path.join(os.path.dirname(__file__), 'valid_input.csv'), 'r') as f: csv_data = f.read() rsp = requests.post( @@ -229,7 +225,11 @@ def test_provider_sagemaker_deployment_wrong_port(): # Sagemaker executor would start on 8080. # If we use the same port for deployment, it should raise an error. with pytest.raises(ValueError): - with Deployment(uses=os.path.join( + with Deployment( + uses=os.path.join( os.path.dirname(__file__), "SampleExecutor", "config.yml" - ), provider='sagemaker', port=8080): + ), + provider='sagemaker', + port=8080, + ): pass diff --git a/tests/integration/docarray_v2/test_singleton.py b/tests/integration/docarray_v2/test_singleton.py index 24e9780b7887e..e8cd663eb10d5 100644 --- a/tests/integration/docarray_v2/test_singleton.py +++ b/tests/integration/docarray_v2/test_singleton.py @@ -8,7 +8,9 @@ @pytest.mark.parametrize('ctxt_manager', ['deployment', 'flow']) -@pytest.mark.parametrize('protocols', [['grpc'], ['http'], ['websocket'], ['grpc', 'http']]) +@pytest.mark.parametrize( + 'protocols', [['grpc'], ['http'], ['websocket'], ['grpc', 'http']] +) @pytest.mark.parametrize('return_type', ['batch', 'singleton']) @pytest.mark.parametrize('include_gateway', [True, False]) def test_singleton_return(ctxt_manager, protocols, return_type, include_gateway): @@ -28,28 +30,48 @@ class MySingletonReturnOutputDoc(BaseDoc): class MySingletonExecutorReturn(Executor): @requests(on='/foo') - def foo(self, docs: DocList[MySingletonReturnInputDoc], **kwargs) -> DocList[MySingletonReturnOutputDoc]: + def foo( + self, docs: DocList[MySingletonReturnInputDoc], **kwargs + ) -> DocList[MySingletonReturnOutputDoc]: return DocList[MySingletonReturnOutputDoc]( - [MySingletonReturnOutputDoc(text=docs[0].text + '_changed', category=str(docs[0].price + 1))]) + [ + MySingletonReturnOutputDoc( + text=docs[0].text + '_changed', category=str(docs[0].price + 1) + ) + ] + ) @requests(on='/foo_single') - def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonReturnOutputDoc: - return MySingletonReturnOutputDoc(text=doc.text + '_changed', category=str(doc.price + 1)) + def foo_single( + self, doc: MySingletonReturnInputDoc, **kwargs + ) -> MySingletonReturnOutputDoc: + return MySingletonReturnOutputDoc( + text=doc.text + '_changed', category=str(doc.price + 1) + ) ports = [random_port() for _ in protocols] if ctxt_manager == 'flow': ctxt = Flow(ports=ports, protocol=protocols).add(uses=MySingletonExecutorReturn) else: - ctxt = Deployment(ports=ports, protocol=protocols, uses=MySingletonExecutorReturn, - include_gateway=include_gateway) + ctxt = Deployment( + ports=ports, + protocol=protocols, + uses=MySingletonExecutorReturn, + include_gateway=include_gateway, + ) with ctxt: for port, protocol in zip(ports, protocols): c = Client(port=port, protocol=protocol) docs = c.post( - on='/foo', inputs=MySingletonReturnInputDoc(text='hello', price=2), return_type=DocList[ - MySingletonReturnOutputDoc] if return_type == 'batch' else MySingletonReturnOutputDoc + on='/foo', + inputs=MySingletonReturnInputDoc(text='hello', price=2), + return_type=( + DocList[MySingletonReturnOutputDoc] + if return_type == 'batch' + else MySingletonReturnOutputDoc + ), ) if return_type == 'batch': assert docs[0].text == 'hello_changed' @@ -59,8 +81,13 @@ def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonRet assert docs.category == str(3) docs = c.post( - on='/foo_single', inputs=MySingletonReturnInputDoc(text='hello', price=2), return_type=DocList[ - MySingletonReturnOutputDoc] if return_type == 'batch' else MySingletonReturnOutputDoc + on='/foo_single', + inputs=MySingletonReturnInputDoc(text='hello', price=2), + return_type=( + DocList[MySingletonReturnOutputDoc] + if return_type == 'batch' + else MySingletonReturnOutputDoc + ), ) if return_type == 'batch': assert docs[0].text == 'hello_changed' @@ -71,7 +98,9 @@ def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonRet @pytest.mark.parametrize('ctxt_manager', ['deployment', 'flow']) -@pytest.mark.parametrize('protocols', [['grpc'], ['http'], ['websocket'], ['grpc', 'http']]) +@pytest.mark.parametrize( + 'protocols', [['grpc'], ['http'], ['websocket'], ['grpc', 'http']] +) @pytest.mark.parametrize('return_type', ['batch', 'singleton']) def test_singleton_return_async(ctxt_manager, protocols, return_type): if 'websocket' in protocols and ctxt_manager != 'flow': @@ -88,29 +117,47 @@ class MySingletonReturnOutputDoc(BaseDoc): class MySingletonExecutorReturn(Executor): @requests(on='/foo') - async def foo(self, docs: DocList[MySingletonReturnInputDoc], **kwargs) -> DocList[MySingletonReturnOutputDoc]: + async def foo( + self, docs: DocList[MySingletonReturnInputDoc], **kwargs + ) -> DocList[MySingletonReturnOutputDoc]: await asyncio.sleep(0.01) return DocList[MySingletonReturnOutputDoc]( - [MySingletonReturnOutputDoc(text=docs[0].text + '_changed', category=str(docs[0].price + 1))]) + [ + MySingletonReturnOutputDoc( + text=docs[0].text + '_changed', category=str(docs[0].price + 1) + ) + ] + ) @requests(on='/foo_single') - async def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonReturnOutputDoc: + async def foo_single( + self, doc: MySingletonReturnInputDoc, **kwargs + ) -> MySingletonReturnOutputDoc: await asyncio.sleep(0.01) - return MySingletonReturnOutputDoc(text=doc.text + '_changed', category=str(doc.price + 1)) + return MySingletonReturnOutputDoc( + text=doc.text + '_changed', category=str(doc.price + 1) + ) ports = [random_port() for _ in protocols] if ctxt_manager == 'flow': ctxt = Flow(ports=ports, protocol=protocols).add(uses=MySingletonExecutorReturn) else: - ctxt = Deployment(ports=ports, protocol=protocols, uses=MySingletonExecutorReturn) + ctxt = Deployment( + ports=ports, protocol=protocols, uses=MySingletonExecutorReturn + ) with ctxt: for port, protocol in zip(ports, protocols): c = Client(port=port, protocol=protocol) docs = c.post( - on='/foo', inputs=MySingletonReturnInputDoc(text='hello', price=2), return_type=DocList[ - MySingletonReturnOutputDoc] if return_type == 'batch' else MySingletonReturnOutputDoc + on='/foo', + inputs=MySingletonReturnInputDoc(text='hello', price=2), + return_type=( + DocList[MySingletonReturnOutputDoc] + if return_type == 'batch' + else MySingletonReturnOutputDoc + ), ) if return_type == 'batch': assert docs[0].text == 'hello_changed' @@ -120,8 +167,13 @@ async def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingle assert docs.category == str(3) docs = c.post( - on='/foo_single', inputs=MySingletonReturnInputDoc(text='hello', price=2), return_type=DocList[ - MySingletonReturnOutputDoc] if return_type == 'batch' else MySingletonReturnOutputDoc + on='/foo_single', + inputs=MySingletonReturnInputDoc(text='hello', price=2), + return_type=( + DocList[MySingletonReturnOutputDoc] + if return_type == 'batch' + else MySingletonReturnOutputDoc + ), ) if return_type == 'batch': assert docs[0].text == 'hello_changed' @@ -132,7 +184,9 @@ async def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingle @pytest.mark.parametrize('ctxt_manager', ['deployment', 'flow']) -@pytest.mark.parametrize('protocols', [['grpc'], ['http'], ['websocket'], ['grpc', 'http']]) +@pytest.mark.parametrize( + 'protocols', [['grpc'], ['http'], ['websocket'], ['grpc', 'http']] +) @pytest.mark.parametrize('return_type', ['batch', 'singleton']) def test_singleton_in_place(ctxt_manager, protocols, return_type): if 'websocket' in protocols and ctxt_manager != 'flow': @@ -145,29 +199,42 @@ class MySingletonInPlaceDoc(BaseDoc): class MySingletonExecutorInPlace(Executor): @requests(on='/foo') - def foo(self, docs: DocList[MySingletonInPlaceDoc], **kwargs) -> DocList[MySingletonInPlaceDoc]: + def foo( + self, docs: DocList[MySingletonInPlaceDoc], **kwargs + ) -> DocList[MySingletonInPlaceDoc]: for doc in docs: doc.text = doc.text + '_changed' doc.price += 1 @requests(on='/foo_single') - def foo_single(self, doc: MySingletonInPlaceDoc, **kwargs) -> MySingletonInPlaceDoc: + def foo_single( + self, doc: MySingletonInPlaceDoc, **kwargs + ) -> MySingletonInPlaceDoc: doc.text = doc.text + '_changed' doc.price += 1 ports = [random_port() for _ in protocols] if ctxt_manager == 'flow': - ctxt = Flow(ports=ports, protocol=protocols).add(uses=MySingletonExecutorInPlace) + ctxt = Flow(ports=ports, protocol=protocols).add( + uses=MySingletonExecutorInPlace + ) else: - ctxt = Deployment(ports=ports, protocol=protocols, uses=MySingletonExecutorInPlace) + ctxt = Deployment( + ports=ports, protocol=protocols, uses=MySingletonExecutorInPlace + ) with ctxt: for port, protocol in zip(ports, protocols): c = Client(port=port, protocol=protocol) docs = c.post( - on='/foo', inputs=MySingletonInPlaceDoc(text='hello', price=2), - return_type=DocList[MySingletonInPlaceDoc] if return_type == 'batch' else MySingletonInPlaceDoc + on='/foo', + inputs=MySingletonInPlaceDoc(text='hello', price=2), + return_type=( + DocList[MySingletonInPlaceDoc] + if return_type == 'batch' + else MySingletonInPlaceDoc + ), ) if return_type == 'batch': assert docs[0].text == 'hello_changed' @@ -177,8 +244,13 @@ def foo_single(self, doc: MySingletonInPlaceDoc, **kwargs) -> MySingletonInPlace assert docs.price == 3 docs = c.post( - on='/foo_single', inputs=MySingletonInPlaceDoc(text='hello', price=2), - return_type=DocList[MySingletonInPlaceDoc] if return_type == 'batch' else MySingletonInPlaceDoc + on='/foo_single', + inputs=MySingletonInPlaceDoc(text='hello', price=2), + return_type=( + DocList[MySingletonInPlaceDoc] + if return_type == 'batch' + else MySingletonInPlaceDoc + ), ) if return_type == 'batch': assert docs[0].text == 'hello_changed' @@ -188,7 +260,9 @@ def foo_single(self, doc: MySingletonInPlaceDoc, **kwargs) -> MySingletonInPlace assert docs.price == 3 -@pytest.mark.parametrize('protocols', [['grpc'], ['http'], ['http', 'grpc', 'websocket']]) +@pytest.mark.parametrize( + 'protocols', [['grpc'], ['http'], ['http', 'grpc', 'websocket']] +) @pytest.mark.parametrize('return_type', ['batch', 'singleton']) def test_singleton_in_flow_in_the_middle(protocols, return_type): class MySingletonFlowDoc(BaseDoc): @@ -219,7 +293,9 @@ def foo(self, doc: MySingletonFlowDoc, **kwargs) -> MySingletonFlowDoc: class MyLastSingletonIntheMiddleExecutor(Executor): @requests - def foo(self, docs: DocList[MySingletonFlowDoc], **kwargs) -> DocList[OutputDoc]: + def foo( + self, docs: DocList[MySingletonFlowDoc], **kwargs + ) -> DocList[OutputDoc]: ret = DocList[OutputDoc]() for doc in docs: ret.append(OutputDoc(output=doc.num)) @@ -227,15 +303,20 @@ def foo(self, docs: DocList[MySingletonFlowDoc], **kwargs) -> DocList[OutputDoc] ports = [random_port() for _ in protocols] - flow = Flow(ports=ports, protocol=protocols).add(uses=MyFirstSingletonIntheMiddleExecutor).add( - uses=MySingletonIntheMiddleExecutor).add(uses=MyLastSingletonIntheMiddleExecutor) + flow = ( + Flow(ports=ports, protocol=protocols) + .add(uses=MyFirstSingletonIntheMiddleExecutor) + .add(uses=MySingletonIntheMiddleExecutor) + .add(uses=MyLastSingletonIntheMiddleExecutor) + ) with flow: for port, protocol in zip(ports, protocols): c = Client(port=port, protocol=protocol) docs = c.post( - on='/foo', inputs=InputDoc(input='hello'), - return_type=DocList[OutputDoc] if return_type == 'batch' else OutputDoc + on='/foo', + inputs=InputDoc(input='hello'), + return_type=DocList[OutputDoc] if return_type == 'batch' else OutputDoc, ) if return_type == 'batch': assert docs[0].output == 2 * len('hello') @@ -244,8 +325,11 @@ def foo(self, docs: DocList[MySingletonFlowDoc], **kwargs) -> DocList[OutputDoc] c = Client(port=port, protocol=protocol) docs = c.post( - on='/foo', inputs=DocList[InputDoc]([InputDoc(input='hello'), InputDoc(input='hello')]), - return_type=DocList[OutputDoc] if return_type == 'batch' else OutputDoc + on='/foo', + inputs=DocList[InputDoc]( + [InputDoc(input='hello'), InputDoc(input='hello')] + ), + return_type=DocList[OutputDoc] if return_type == 'batch' else OutputDoc, ) assert isinstance(docs, DocList[OutputDoc]) # I have sent 2 assert len(docs) == 2 @@ -253,7 +337,9 @@ def foo(self, docs: DocList[MySingletonFlowDoc], **kwargs) -> DocList[OutputDoc] assert doc.output == 2 * len('hello') -@pytest.mark.parametrize('protocols', [['grpc'], ['http'], ['http', 'grpc', 'websocket']]) +@pytest.mark.parametrize( + 'protocols', [['grpc'], ['http'], ['http', 'grpc', 'websocket']] +) def test_flow_incompatibility_with_singleton(protocols): class First(Executor): @requests @@ -289,26 +375,41 @@ class MySingletonReturnOutputDoc(BaseDoc): class MySingletonExecutorReturn(Executor): @requests(on='/foo') - def foo(self, docs: DocList[MySingletonReturnInputDoc], **kwargs) -> DocList[MySingletonReturnOutputDoc]: + def foo( + self, docs: DocList[MySingletonReturnInputDoc], **kwargs + ) -> DocList[MySingletonReturnOutputDoc]: ret = DocList[MySingletonReturnOutputDoc]() for doc in docs: - ret.append(MySingletonReturnOutputDoc(text=doc.text + '_changed', category=str(doc.price + 1))) + ret.append( + MySingletonReturnOutputDoc( + text=doc.text + '_changed', category=str(doc.price + 1) + ) + ) return ret @requests(on='/foo_single') - def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonReturnOutputDoc: - return MySingletonReturnOutputDoc(text=doc.text + '_changed', category=str(doc.price + 1)) + def foo_single( + self, doc: MySingletonReturnInputDoc, **kwargs + ) -> MySingletonReturnOutputDoc: + return MySingletonReturnOutputDoc( + text=doc.text + '_changed', category=str(doc.price + 1) + ) port = random_port() if ctxt_manager == 'flow': ctxt = Flow(port=port, protocol='http').add(uses=MySingletonExecutorReturn) else: - ctxt = Deployment(port=port, protocol='http', uses=MySingletonExecutorReturn, - include_gateway=include_gateway) + ctxt = Deployment( + port=port, + protocol='http', + uses=MySingletonExecutorReturn, + include_gateway=include_gateway, + ) with ctxt: import requests as global_requests + for endpoint in {'foo', 'foo_single'}: url = f'http://localhost:{port}/{endpoint}' myobj = {'data': {'text': 'hello', 'price': 2}} @@ -321,7 +422,9 @@ def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonRet resp_json = resp.json() assert resp_json['data'][0]['text'] == 'hello_changed' assert resp_json['data'][0]['category'] == str(3) - myobj = {'data': [{'text': 'hello', 'price': 2}, {'text': 'hello', 'price': 2}]} + myobj = { + 'data': [{'text': 'hello', 'price': 2}, {'text': 'hello', 'price': 2}] + } resp = global_requests.post(url, json=myobj) resp_json = resp.json() assert len(resp_json['data']) == 2 @@ -332,18 +435,21 @@ def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonRet def test_invalid_singleton_batch_combination(): with pytest.raises(Exception): + class Invalid1(Executor): @requests def foo(self, doc: ImageDoc, **kwargs) -> DocList[ImageDoc]: pass with pytest.raises(Exception): + class Invalid2(Executor): @requests async def foo(self, doc: ImageDoc, **kwargs) -> DocList[ImageDoc]: pass with pytest.raises(Exception): + class Invalid3(Executor): @requests async def foo(self, doc: ImageDoc, **kwargs) -> DocList[ImageDoc]: @@ -351,20 +457,25 @@ async def foo(self, doc: ImageDoc, **kwargs) -> DocList[ImageDoc]: yield doc with pytest.raises(Exception): + class Invalid4(Executor): @requests def foo(self, docs: DocList[ImageDoc], **kwargs) -> ImageDoc: pass with pytest.raises(Exception): + class Invalid6(Executor): @requests async def foo(self, docs: DocList[ImageDoc], **kwargs) -> ImageDoc: pass + @pytest.mark.asyncio @pytest.mark.parametrize('ctxt_manager', ['deployment', 'flow']) -@pytest.mark.parametrize('protocols', [['grpc'], ['http'], ['websocket'], ['grpc', 'http']]) +@pytest.mark.parametrize( + 'protocols', [['grpc'], ['http'], ['websocket'], ['grpc', 'http']] +) @pytest.mark.parametrize('return_type', ['batch', 'singleton']) @pytest.mark.parametrize('include_gateway', [True, False]) async def test_async_client(ctxt_manager, protocols, return_type, include_gateway): @@ -384,29 +495,49 @@ class MySingletonReturnOutputDoc(BaseDoc): class MySingletonExecutorReturn(Executor): @requests(on='/foo') - def foo(self, docs: DocList[MySingletonReturnInputDoc], **kwargs) -> DocList[MySingletonReturnOutputDoc]: + def foo( + self, docs: DocList[MySingletonReturnInputDoc], **kwargs + ) -> DocList[MySingletonReturnOutputDoc]: return DocList[MySingletonReturnOutputDoc]( - [MySingletonReturnOutputDoc(text=docs[0].text + '_changed', category=str(docs[0].price + 1))]) + [ + MySingletonReturnOutputDoc( + text=docs[0].text + '_changed', category=str(docs[0].price + 1) + ) + ] + ) @requests(on='/foo_single') - def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonReturnOutputDoc: - return MySingletonReturnOutputDoc(text=doc.text + '_changed', category=str(doc.price + 1)) + def foo_single( + self, doc: MySingletonReturnInputDoc, **kwargs + ) -> MySingletonReturnOutputDoc: + return MySingletonReturnOutputDoc( + text=doc.text + '_changed', category=str(doc.price + 1) + ) ports = [random_port() for _ in protocols] if ctxt_manager == 'flow': ctxt = Flow(ports=ports, protocol=protocols).add(uses=MySingletonExecutorReturn) else: - ctxt = Deployment(ports=ports, protocol=protocols, uses=MySingletonExecutorReturn, - include_gateway=include_gateway) + ctxt = Deployment( + ports=ports, + protocol=protocols, + uses=MySingletonExecutorReturn, + include_gateway=include_gateway, + ) with ctxt: for port, protocol in zip(ports, protocols): c = Client(port=port, protocol=protocol, asyncio=True) async for doc in c.post( - on='/foo', inputs=MySingletonReturnInputDoc(text='hello', price=2), return_type=DocList[ - MySingletonReturnOutputDoc] if return_type == 'batch' else MySingletonReturnOutputDoc + on='/foo', + inputs=MySingletonReturnInputDoc(text='hello', price=2), + return_type=( + DocList[MySingletonReturnOutputDoc] + if return_type == 'batch' + else MySingletonReturnOutputDoc + ), ): if return_type == 'batch': assert isinstance(doc, DocList) @@ -419,8 +550,13 @@ def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonRet assert doc.category == str(3) async for doc in c.post( - on='/foo_single', inputs=MySingletonReturnInputDoc(text='hello', price=2), return_type=DocList[ - MySingletonReturnOutputDoc] if return_type == 'batch' else MySingletonReturnOutputDoc + on='/foo_single', + inputs=MySingletonReturnInputDoc(text='hello', price=2), + return_type=( + DocList[MySingletonReturnOutputDoc] + if return_type == 'batch' + else MySingletonReturnOutputDoc + ), ): if return_type == 'batch': assert isinstance(doc, DocList) @@ -431,4 +567,3 @@ def foo_single(self, doc: MySingletonReturnInputDoc, **kwargs) -> MySingletonRet assert isinstance(doc, BaseDoc) assert doc.text == 'hello_changed' assert doc.category == str(3) - diff --git a/tests/integration/docarray_v2/test_v2.py b/tests/integration/docarray_v2/test_v2.py index a40f1d745be1f..eebbafd1a572f 100644 --- a/tests/integration/docarray_v2/test_v2.py +++ b/tests/integration/docarray_v2/test_v2.py @@ -977,11 +977,13 @@ def foo(self, docs: DocList[ImageDoc], **kwargs) -> DocList[ImageDoc]: @pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket']) def test_flow_compatible_different_exact_schema(protocol): from pydantic import Field + class MyFirstDoc(BaseDoc): a: str = Field(default='My first default') class MySecondDoc(BaseDoc): a: str = Field(default='My second default') + class FirstTestComp(Executor): @requests def foo(self, docs: DocList[MyFirstDoc], **kwargs) -> DocList[MyFirstDoc]: @@ -1616,6 +1618,7 @@ def generate( @pytest.mark.repeat(10) def test_exception_handling_in_dynamic_batch(): from jina.proto import jina_pb2 + class DummyEmbeddingDoc(BaseDoc): lf: List[float] = [] @@ -1636,11 +1639,18 @@ def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[DummyEmbeddingDoc]: with depl: da = DocList[TextDoc]([TextDoc(text=f'good-{i}') for i in range(50)]) da[4].text = 'fail' - responses = depl.post(on='/foo', inputs=da, request_size=1, return_responses=True, continue_on_error=True, results_in_order=True) + responses = depl.post( + on='/foo', + inputs=da, + request_size=1, + return_responses=True, + continue_on_error=True, + results_in_order=True, + ) assert len(responses) == 50 # 1 request per input num_failed_requests = 0 for r in responses: if r.header.status.code == jina_pb2.StatusProto.StatusCode.ERROR: num_failed_requests += 1 - assert 1 <= num_failed_requests <= 3 # 3 requests in the dynamic batch failing \ No newline at end of file + assert 1 <= num_failed_requests <= 3 # 3 requests in the dynamic batch failing diff --git a/tests/integration/dynamic_batching/test_dynamic_batching.py b/tests/integration/dynamic_batching/test_dynamic_batching.py index b21769e09fddd..90126a82700f5 100644 --- a/tests/integration/dynamic_batching/test_dynamic_batching.py +++ b/tests/integration/dynamic_batching/test_dynamic_batching.py @@ -573,7 +573,8 @@ def run(args): with mp.Pool(3) as p: results = [ p.apply_async( - _assert_all_docs_processed, (args.port[0], req_size, '/long_timeout') + _assert_all_docs_processed, + (args.port[0], req_size, '/long_timeout'), ) for req_size in [10, 20, 30] ] @@ -644,11 +645,18 @@ def foo(self, docs, **kwargs): with depl: da = DocumentArray([Document(text='good') for _ in range(50)]) da[4].text = 'fail' - responses = depl.post(on='/foo', inputs=da, request_size=1, return_responses=True, continue_on_error=True, results_in_order=True) + responses = depl.post( + on='/foo', + inputs=da, + request_size=1, + return_responses=True, + continue_on_error=True, + results_in_order=True, + ) assert len(responses) == 50 # 1 request per input num_failed_requests = 0 for r in responses: if r.header.status.code == jina_pb2.StatusProto.StatusCode.ERROR: num_failed_requests += 1 - assert 1 <= num_failed_requests <= 3 # 3 requests in the dynamic batch failing + assert 1 <= num_failed_requests <= 3 # 3 requests in the dynamic batch failing diff --git a/tests/integration/floating_deployments/test_floating_deployments.py b/tests/integration/floating_deployments/test_floating_deployments.py index 3ff321ae96195..665d5aad75a3d 100644 --- a/tests/integration/floating_deployments/test_floating_deployments.py +++ b/tests/integration/floating_deployments/test_floating_deployments.py @@ -32,8 +32,8 @@ def test_floating_executors(tmpdir, protocol): f = ( Flow(protocol=protocol) - .add(name='first') - .add( + .add(name='first') + .add( name='second', floating=True, uses=FloatingTestExecutor, @@ -47,8 +47,8 @@ def test_floating_executors(tmpdir, protocol): ret = f.post(on=__default_endpoint__, inputs=DocumentArray.empty(1)) end_time = time.time() assert ( - end_time - start_time - ) < TIME_SLEEP_FLOATING # check that the response arrives before the + end_time - start_time + ) < TIME_SLEEP_FLOATING # check that the response arrives before the # Floating Executor finishes assert len(ret) == 1 assert ret[0].text == '' @@ -67,8 +67,8 @@ def test_floating_executors_right_after_gateway(tmpdir, protocol): f = ( Flow(protocol=protocol) - .add(name='first') - .add( + .add(name='first') + .add( name='second', floating=True, uses=FloatingTestExecutor, @@ -83,8 +83,8 @@ def test_floating_executors_right_after_gateway(tmpdir, protocol): ret = f.post(on=__default_endpoint__, inputs=DocumentArray.empty(1)) end_time = time.time() assert ( - end_time - start_time - ) < TIME_SLEEP_FLOATING # check that the response arrives before the + end_time - start_time + ) < TIME_SLEEP_FLOATING # check that the response arrives before the # Floating Executor finishes assert len(ret) == 1 assert ret[0].text == '' @@ -104,14 +104,14 @@ def test_multiple_floating_points(tmpdir, protocol): f = ( Flow(protocol=protocol) - .add(name='first') - .add( + .add(name='first') + .add( name='second', floating=True, uses=FloatingTestExecutor, uses_with={'file_name': file_name1}, ) - .add( + .add( name='third', floating=True, uses=FloatingTestExecutor, @@ -125,8 +125,8 @@ def test_multiple_floating_points(tmpdir, protocol): ret = f.post(on=__default_endpoint__, inputs=DocumentArray.empty(1)) end_time = time.time() assert ( - end_time - start_time - ) < TIME_SLEEP_FLOATING # check that the response arrives before the + end_time - start_time + ) < TIME_SLEEP_FLOATING # check that the response arrives before the assert len(ret) == 1 assert ret[0].text == '' @@ -150,27 +150,27 @@ def test_complex_flow(tmpdir, protocol): f = ( Flow(protocol=protocol) - .add(name='pod0') - .add(name='pod4', needs=['gateway']) - .add( + .add(name='pod0') + .add(name='pod4', needs=['gateway']) + .add( name='floating_pod6', needs=['gateway'], floating=True, uses=FloatingTestExecutor, uses_with={'file_name': file_name2}, ) - .add( + .add( name='floating_pod1', needs=['pod0'], floating=True, uses=FloatingTestExecutor, uses_with={'file_name': file_name1}, ) - .add(name='pod2', needs=['pod0']) - .add(name='pod3', needs=['pod2']) - .add(name='pod5', needs=['pod4']) - .add(name='merger', needs=['pod5', 'pod3']) - .add(name='pod_last', needs=['merger']) + .add(name='pod2', needs=['pod0']) + .add(name='pod3', needs=['pod2']) + .add(name='pod5', needs=['pod4']) + .add(name='merger', needs=['pod5', 'pod3']) + .add(name='pod_last', needs=['merger']) ) with f: @@ -179,8 +179,8 @@ def test_complex_flow(tmpdir, protocol): ret = f.post(on=__default_endpoint__, inputs=DocumentArray.empty(1)) end_time = time.time() assert ( - end_time - start_time - ) < TIME_SLEEP_FLOATING # check that the response arrives before the + end_time - start_time + ) < TIME_SLEEP_FLOATING # check that the response arrives before the assert len(ret) == 1 assert ret[0].text == '' @@ -209,8 +209,8 @@ def foo(self, docs, **kwargs): f = ( Flow() - .add(name='executor0', uses=FastChangingExecutor) - .add( + .add(name='executor0', uses=FastChangingExecutor) + .add( name='floating_executor', uses=FloatingTestExecutor, uses_with={'file_name': file_name}, @@ -275,16 +275,16 @@ def foo(self, docs, **kwargs): f = ( Flow() - .add(name='executor0', uses=FastChangingExecutor) - .add(name='executor1', uses=FastAddExecutor, needs=['executor0']) - .add( + .add(name='executor0', uses=FastChangingExecutor) + .add(name='executor1', uses=FastAddExecutor, needs=['executor0']) + .add( name='floating_executor', uses=FloatingTestExecutorWriteDocs, uses_with={'file_name': file_name1}, needs=[needs], floating=True, ) - .add( + .add( name='floating_executor_2', uses=FloatingTestExecutorWriteDocs, uses_with={'file_name': file_name2}, @@ -328,8 +328,12 @@ def foo(self, docs, **kwargs): with open(self.file_name, 'a+', encoding='utf-8') as f: f.write(str(len(docs))) - flow = Flow(protocol=protocol).add(name='A', floating=True, uses=FloatingTestExecutorWriteDocs, - uses_with={'file_name': file_name}) + flow = Flow(protocol=protocol).add( + name='A', + floating=True, + uses=FloatingTestExecutorWriteDocs, + uses_with={'file_name': file_name}, + ) with flow: flow.post(on='/', inputs=DocumentArray.empty(1)) diff --git a/tests/integration/gateway_clients/test_clients_gateways.py b/tests/integration/gateway_clients/test_clients_gateways.py index 484fa84d39e05..ff73672cdde09 100644 --- a/tests/integration/gateway_clients/test_clients_gateways.py +++ b/tests/integration/gateway_clients/test_clients_gateways.py @@ -207,7 +207,8 @@ def decompress(self): '--protocol', protocol, ] - ), req_handler_cls=GatewayRequestHandler + ), + req_handler_cls=GatewayRequestHandler, ) as runtime: runtime.run_forever() diff --git a/tests/integration/hot_reload/test_hot_reload.py b/tests/integration/hot_reload/test_hot_reload.py index 1365f124599bc..aae377c24bbdc 100644 --- a/tests/integration/hot_reload/test_hot_reload.py +++ b/tests/integration/hot_reload/test_hot_reload.py @@ -122,16 +122,18 @@ def test_reload_with_inheritance(tmpdir): def test_reload_from_config(tmpdir): - f = Flow().add(uses=os.path.join(cur_dir, os.path.join('exec4', 'config.yml')), reload=True) + f = Flow().add( + uses=os.path.join(cur_dir, os.path.join('exec4', 'config.yml')), reload=True + ) with f: res = f.post(on='/', inputs=DocumentArray.empty(10)) assert len(res) == 10 for doc in res: assert doc.text == 'MyExecutorBeforeReload' with _update_file( - os.path.join(cur_dir, 'my_executor_4_new.py'), - os.path.join(cur_dir, 'exec4/my_executor4.py'), - str(tmpdir), + os.path.join(cur_dir, 'my_executor_4_new.py'), + os.path.join(cur_dir, 'exec4/my_executor4.py'), + str(tmpdir), ): res = f.post(on='/', inputs=DocumentArray.empty(10)) assert len(res) == 10 @@ -140,4 +142,4 @@ def test_reload_from_config(tmpdir): res = f.post(on='/', inputs=DocumentArray.empty(10)) assert len(res) == 10 for doc in res: - assert doc.text == 'MyExecutorBeforeReload' \ No newline at end of file + assert doc.text == 'MyExecutorBeforeReload' diff --git a/tests/integration/install_requirements/test_install_requirements.py b/tests/integration/install_requirements/test_install_requirements.py index 649937602af19..54cee9552b50b 100644 --- a/tests/integration/install_requirements/test_install_requirements.py +++ b/tests/integration/install_requirements/test_install_requirements.py @@ -5,7 +5,10 @@ def test_install_reqs(): - f = Flow().add(install_requirements=True, uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml')) + f = Flow().add( + install_requirements=True, + uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'), + ) with f: resp = f.post(on='/', inputs=DocumentArray.empty(2)) diff --git a/tests/integration/issues/github_3124/test_cli_executor.py b/tests/integration/issues/github_3124/test_cli_executor.py index 7df4fa5224695..99c8f3d885663 100644 --- a/tests/integration/issues/github_3124/test_cli_executor.py +++ b/tests/integration/issues/github_3124/test_cli_executor.py @@ -28,4 +28,3 @@ def test_executor_cli_docker(docker_image): poll = process.poll() process.terminate() assert poll is None - diff --git a/tests/integration/issues/github_5543/test_reentrant_flows.py b/tests/integration/issues/github_5543/test_reentrant_flows.py index 1afd341e7c539..c484215a23e9a 100644 --- a/tests/integration/issues/github_5543/test_reentrant_flows.py +++ b/tests/integration/issues/github_5543/test_reentrant_flows.py @@ -7,5 +7,10 @@ def test_reentrant(use_stream): for _ in range(10): f = Flow().add() with f: - docs = f.post(on='/', inputs=DocumentArray.empty(100), request_size=1, stream=use_stream) + docs = f.post( + on='/', + inputs=DocumentArray.empty(100), + request_size=1, + stream=use_stream, + ) assert len(docs) == 100 diff --git a/tests/integration/monitoring/test_executor.py b/tests/integration/monitoring/test_executor.py index 18a388829f043..27cfab333ed72 100644 --- a/tests/integration/monitoring/test_executor.py +++ b/tests/integration/monitoring/test_executor.py @@ -38,12 +38,10 @@ def foo(self, docs, **kwargs): self.process_2(docs) @monitor(name='metrics_name', documentation='metrics description') - def _process(self, docs): - ... + def _process(self, docs): ... @monitor() - def process_2(self, docs): - ... + def process_2(self, docs): ... port = port_generator() with Flow(monitoring=True, port_monitoring=port_generator()).add( @@ -76,11 +74,9 @@ def foo(self, docs, **kwargs): ): self.process_2(docs) - def _process(self, docs): - ... + def _process(self, docs): ... - def process_2(self, docs): - ... + def process_2(self, docs): ... port = port_generator() with Flow(monitoring=True, port_monitoring=port_generator()).add( diff --git a/tests/integration/monitoring/test_monitoring.py b/tests/integration/monitoring/test_monitoring.py index d6f0bcf7ad8ba..1217b6497ddbb 100644 --- a/tests/integration/monitoring/test_monitoring.py +++ b/tests/integration/monitoring/test_monitoring.py @@ -11,12 +11,10 @@ def executor(): class DummyExecutor(Executor): @requests(on='/foo') - def foo(self, docs, **kwargs): - ... + def foo(self, docs, **kwargs): ... @requests(on='/bar') - def bar(self, docs, **kwargs): - ... + def bar(self, docs, **kwargs): ... return DummyExecutor diff --git a/tests/integration/monitoring/test_request_size.py b/tests/integration/monitoring/test_request_size.py index ecf0ca510d225..6a7d9ad0b15db 100644 --- a/tests/integration/monitoring/test_request_size.py +++ b/tests/integration/monitoring/test_request_size.py @@ -11,12 +11,10 @@ def executor(): class DummyExecutor(Executor): @requests(on='/foo') - def foo(self, docs, **kwargs): - ... + def foo(self, docs, **kwargs): ... @requests(on='/bar') - def bar(self, docs, **kwargs): - ... + def bar(self, docs, **kwargs): ... return DummyExecutor diff --git a/tests/integration/multiple_protocol_gateway/test_multiple_protocols_gateway.py b/tests/integration/multiple_protocol_gateway/test_multiple_protocols_gateway.py index 8d1de29d3df5c..ce4bc3dfcaee8 100644 --- a/tests/integration/multiple_protocol_gateway/test_multiple_protocols_gateway.py +++ b/tests/integration/multiple_protocol_gateway/test_multiple_protocols_gateway.py @@ -38,7 +38,9 @@ def multi_port_gateway_docker_image_built(): ], ) @pytest.mark.parametrize('use_stream', [False, True]) -def test_multiple_protocols_gateway(multi_port_gateway_docker_image_built, uses, use_stream): +def test_multiple_protocols_gateway( + multi_port_gateway_docker_image_built, uses, use_stream +): http_port = random_port() grpc_port = random_port() flow = Flow().config_gateway( diff --git a/tests/integration/network_failures/test_network_failures.py b/tests/integration/network_failures/test_network_failures.py index 71fab67bb04bd..288275f917b6c 100644 --- a/tests/integration/network_failures/test_network_failures.py +++ b/tests/integration/network_failures/test_network_failures.py @@ -13,7 +13,10 @@ from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler from tests.helper import _generate_pod_args -from tests.integration.runtimes.test_runtimes import _create_gateway_runtime, _create_head_runtime +from tests.integration.runtimes.test_runtimes import ( + _create_gateway_runtime, + _create_head_runtime, +) class DummyExec(Executor): @@ -489,7 +492,9 @@ async def test_runtimes_replicas( p.join() -@pytest.mark.parametrize('terminate_head', [True]) # option with False times out because backoffs accumulate +@pytest.mark.parametrize( + 'terminate_head', [True] +) # option with False times out because backoffs accumulate @pytest.mark.parametrize('protocol', ['http', 'grpc', 'websocket']) @pytest.mark.asyncio async def test_runtimes_headful_topology(port_generator, protocol, terminate_head): @@ -608,7 +613,8 @@ def _create_gqlgateway_runtime(graph_description, pod_addresses, port): '--protocol', 'http', ] - ), req_handler_cls=GatewayRequestHandler + ), + req_handler_cls=GatewayRequestHandler, ) as runtime: runtime.run_forever() diff --git a/tests/integration/pods/container/gateway-runtime/runtime.py b/tests/integration/pods/container/gateway-runtime/runtime.py index ae277d8a6e8e1..0f48019855ec0 100644 --- a/tests/integration/pods/container/gateway-runtime/runtime.py +++ b/tests/integration/pods/container/gateway-runtime/runtime.py @@ -10,7 +10,9 @@ def run(*args, **kwargs): runtime_args = set_gateway_parser().parse_args(args) _update_gateway_args(runtime_args) - with AsyncNewLoopRuntime(runtime_args, req_handler_cls=GatewayRequestHandler) as runtime: + with AsyncNewLoopRuntime( + runtime_args, req_handler_cls=GatewayRequestHandler + ) as runtime: runtime.run_forever() diff --git a/tests/integration/pods/container/head-runtime/runtime.py b/tests/integration/pods/container/head-runtime/runtime.py index 166b47582bec0..354ebf13a894c 100644 --- a/tests/integration/pods/container/head-runtime/runtime.py +++ b/tests/integration/pods/container/head-runtime/runtime.py @@ -10,7 +10,9 @@ def run(*args, **kwargs): runtime_args.host = runtime_args.host[0] runtime_args.port = runtime_args.port - with AsyncNewLoopRuntime(args=runtime_args, req_handler_cls=HeaderRequestHandler) as runtime: + with AsyncNewLoopRuntime( + args=runtime_args, req_handler_cls=HeaderRequestHandler + ) as runtime: runtime.run_forever() diff --git a/tests/integration/rr_cuda/test_rr_cuda.py b/tests/integration/rr_cuda/test_rr_cuda.py index e7a1012d32824..e744b78034c21 100644 --- a/tests/integration/rr_cuda/test_rr_cuda.py +++ b/tests/integration/rr_cuda/test_rr_cuda.py @@ -30,9 +30,7 @@ def cuda_visible_devices(request): @pytest.mark.parametrize( 'cuda_total_devices, cuda_visible_devices, env', - [ - [3, 'RR', None], [3, None, {'CUDA_VISIBLE_DEVICES': 'RR'}] - ], + [[3, 'RR', None], [3, None, {'CUDA_VISIBLE_DEVICES': 'RR'}]], indirect=['cuda_total_devices', 'cuda_visible_devices'], ) def test_cuda_assignment(cuda_total_devices, cuda_visible_devices, env): @@ -43,7 +41,13 @@ def __init__(self, **kwargs): @requests def foo(self, **kwargs): - return DocumentArray([Document(tags={'cuda_visible_devices': str(self.cuda_visible_devices)})]) + return DocumentArray( + [ + Document( + tags={'cuda_visible_devices': str(self.cuda_visible_devices)} + ) + ] + ) f = Flow().add(uses=MyCUDAUserExecutor, env=env or {}, replicas=3) with f: @@ -51,6 +55,3 @@ def foo(self, **kwargs): cuda_visible_devices = set([doc.tags['cuda_visible_devices'] for doc in ret]) assert cuda_visible_devices == {'0', '1', '2'} - - - diff --git a/tests/integration/runtimes/test_gateway_dry_run.py b/tests/integration/runtimes/test_gateway_dry_run.py index b3eff89022a9d..77cf83faf65a4 100644 --- a/tests/integration/runtimes/test_gateway_dry_run.py +++ b/tests/integration/runtimes/test_gateway_dry_run.py @@ -34,7 +34,8 @@ def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='gr '--protocol', protocol, ] - ), req_handler_cls=GatewayRequestHandler + ), + req_handler_cls=GatewayRequestHandler, ) as runtime: runtime.run_forever() diff --git a/tests/integration/stateful/stateful_no_snapshot_exec/executor.py b/tests/integration/stateful/stateful_no_snapshot_exec/executor.py index 8f37acb9f4a57..6c0944efd9780 100644 --- a/tests/integration/stateful/stateful_no_snapshot_exec/executor.py +++ b/tests/integration/stateful/stateful_no_snapshot_exec/executor.py @@ -26,14 +26,18 @@ def __init__(self, *args, **kwargs): @requests(on=['/index']) @write - def index(self, docs: DocumentArray[TextDocWithId], **kwargs) -> DocumentArray[TextDocWithId]: + def index( + self, docs: DocumentArray[TextDocWithId], **kwargs + ) -> DocumentArray[TextDocWithId]: for doc in docs: self.logger.debug(f'Indexing doc {doc.text} with ID {doc.id}') self._docs.append(doc) self._docs_dict[doc.id] = doc @requests(on=['/search']) - def search(self, docs: DocumentArray[TextDocWithId], **kwargs) -> DocumentArray[TextDocWithId]: + def search( + self, docs: DocumentArray[TextDocWithId], **kwargs + ) -> DocumentArray[TextDocWithId]: for doc in docs: self.logger.debug(f'Searching against {len(self._docs)} documents') doc.text = self._docs_dict[doc.id].text @@ -41,8 +45,12 @@ def search(self, docs: DocumentArray[TextDocWithId], **kwargs) -> DocumentArray[ doc.tags['num'] = random_num @requests(on=['/similarity']) - def search_similarity(self, docs: DocumentArray[TextDocWithId], **kwargs) -> DocumentArray[TextDocWithId]: + def search_similarity( + self, docs: DocumentArray[TextDocWithId], **kwargs + ) -> DocumentArray[TextDocWithId]: for doc in docs: - self.logger.debug(f'Searching similarity against {len(self._docs)} documents') + self.logger.debug( + f'Searching similarity against {len(self._docs)} documents' + ) doc.text = 'similarity' doc.l = [doc.id for doc in self._docs] diff --git a/tests/integration/stateful/stateful_snapshot_exec/executor.py b/tests/integration/stateful/stateful_snapshot_exec/executor.py index 107036569f35a..46323fa457d49 100644 --- a/tests/integration/stateful/stateful_snapshot_exec/executor.py +++ b/tests/integration/stateful/stateful_snapshot_exec/executor.py @@ -25,14 +25,18 @@ def __init__(self, *args, **kwargs): @requests(on=['/index']) @write - def index(self, docs: DocumentArray[TextDocWithId], **kwargs) -> DocumentArray[TextDocWithId]: + def index( + self, docs: DocumentArray[TextDocWithId], **kwargs + ) -> DocumentArray[TextDocWithId]: for doc in docs: self.logger.debug(f'Indexing doc {doc.text} with ID {doc.id}') self._docs.append(doc) self._docs_dict[doc.id] = doc @requests(on=['/search']) - def search(self, docs: DocumentArray[TextDocWithId], **kwargs) -> DocumentArray[TextDocWithId]: + def search( + self, docs: DocumentArray[TextDocWithId], **kwargs + ) -> DocumentArray[TextDocWithId]: for doc in docs: self.logger.debug(f'Searching against {len(self._docs)} documents') doc.text = self._docs_dict[doc.id].text @@ -40,9 +44,13 @@ def search(self, docs: DocumentArray[TextDocWithId], **kwargs) -> DocumentArray[ doc.tags['num'] = random_num @requests(on=['/similarity']) - def search_similarity(self, docs: DocumentArray[TextDocWithId], **kwargs) -> DocumentArray[TextDocWithId]: + def search_similarity( + self, docs: DocumentArray[TextDocWithId], **kwargs + ) -> DocumentArray[TextDocWithId]: for doc in docs: - self.logger.debug(f'Searching similarity against {len(self._docs)} documents') + self.logger.debug( + f'Searching similarity against {len(self._docs)} documents' + ) doc.text = 'similarity' doc.l = [doc.id for doc in self._docs] diff --git a/tests/integration/stateful/test_stateful.py b/tests/integration/stateful/test_stateful.py index 73b575414e561..9c577eccee677 100644 --- a/tests/integration/stateful/test_stateful.py +++ b/tests/integration/stateful/test_stateful.py @@ -8,7 +8,9 @@ from jina.helper import random_port -from tests.integration.stateful.stateful_no_snapshot_exec.executor import MyStateExecutorNoSnapshot +from tests.integration.stateful.stateful_no_snapshot_exec.executor import ( + MyStateExecutorNoSnapshot, +) from tests.integration.stateful.stateful_snapshot_exec.executor import MyStateExecutor from jina._docarray import docarray_v2 @@ -28,6 +30,7 @@ class TextDocWithId(TextDoc): def kill_all_children(): yield from multiprocessing import active_children + children = active_children() for p in children: print(f' Child process {p.pid} is still active') @@ -50,7 +53,9 @@ def stateful_exec_docker_image_built(): def assert_is_indexed(client, search_da): - docs = client.search(inputs=search_da, request_size=1, return_type=DocumentArray[TextDocWithId]) + docs = client.search( + inputs=search_da, request_size=1, return_type=DocumentArray[TextDocWithId] + ) for doc in docs: assert doc.text == f'ID {doc.id}' @@ -59,7 +64,9 @@ def assert_all_replicas_indexed(client, search_da, num_replicas=3, key='pid'): for query in search_da: pids = set() for _ in range(10): - for resp in client.search(inputs=query, request_size=1, return_type=DocumentArray[TextDocWithId]): + for resp in client.search( + inputs=query, request_size=1, return_type=DocumentArray[TextDocWithId] + ): pids.add(resp.tags[key]) assert resp.text == f'ID {query.id}' if len(pids) == num_replicas: @@ -71,7 +78,9 @@ def assert_all_replicas_indexed(client, search_da, num_replicas=3, key='pid'): @pytest.mark.parametrize('executor_cls', [MyStateExecutor, MyStateExecutorNoSnapshot]) @pytest.mark.parametrize('shards', [2, 1]) @pytest.mark.skipif(not docarray_v2, reason='tests support for docarray>=0.30') -def test_stateful_index_search(executor_cls, shards, tmpdir, stateful_exec_docker_image_built, kill_all_children): +def test_stateful_index_search( + executor_cls, shards, tmpdir, stateful_exec_docker_image_built, kill_all_children +): replicas = 3 if shards > 1: peer_ports = {} @@ -93,22 +102,30 @@ def test_stateful_index_search(executor_cls, shards, tmpdir, stateful_exec_docke shards=shards, volumes=[str(tmpdir) + ':' + '/workspace'], peer_ports=peer_ports, - polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'} + polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'}, ) with dep: index_da = DocumentArray[TextDocWithId]( [TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100)] ) - search_da = DocumentArray[TextDocWithId]([TextDocWithId(id=f'{i}') for i in range(1)]) - dep.index(inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]) + search_da = DocumentArray[TextDocWithId]( + [TextDocWithId(id=f'{i}') for i in range(1)] + ) + dep.index( + inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId] + ) # allowing some time for the state to be replicated time.sleep(20) # checking against the main read replica assert_is_indexed(dep, search_da) assert_all_replicas_indexed(dep, search_da) - docs = dep.post(on='/similarity', inputs=search_da, request_size=1, - return_type=DocumentArray[TextDocWithId]) + docs = dep.post( + on='/similarity', + inputs=search_da, + request_size=1, + return_type=DocumentArray[TextDocWithId], + ) for doc in docs: assert doc.text == 'similarity' assert len(doc.l) == len(index_da) # good merging of results @@ -118,9 +135,13 @@ def test_stateful_index_search(executor_cls, shards, tmpdir, stateful_exec_docke @pytest.mark.timeout(240) @pytest.mark.parametrize('executor_cls', [MyStateExecutor, MyStateExecutorNoSnapshot]) @pytest.mark.parametrize('shards', [2, 1]) -@pytest.mark.skipif('GITHUB_WORKFLOW' in os.environ or not docarray_v2, reason='tests support for docarray>=0.30 and not working on GITHUB since issue with restarting server in grpc') -def test_stateful_index_search_restore(executor_cls, shards, tmpdir, stateful_exec_docker_image_built, - kill_all_children): +@pytest.mark.skipif( + 'GITHUB_WORKFLOW' in os.environ or not docarray_v2, + reason='tests support for docarray>=0.30 and not working on GITHUB since issue with restarting server in grpc', +) +def test_stateful_index_search_restore( + executor_cls, shards, tmpdir, stateful_exec_docker_image_built, kill_all_children +): replicas = 3 peer_ports = {} for shard in range(shards): @@ -139,14 +160,18 @@ def test_stateful_index_search_restore(executor_cls, shards, tmpdir, stateful_ex shards=shards, volumes=[str(tmpdir) + ':' + '/workspace'], peer_ports=peer_ports, - polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'} + polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'}, ) with dep: index_da = DocumentArray[TextDocWithId]( [TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100)] ) - search_da = DocumentArray[TextDocWithId]([TextDocWithId(id=f'{i}') for i in range(1)]) - dep.index(inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]) + search_da = DocumentArray[TextDocWithId]( + [TextDocWithId(id=f'{i}') for i in range(1)] + ) + dep.index( + inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId] + ) # allowing some time for the state to be replicated time.sleep(20) @@ -170,22 +195,28 @@ def test_stateful_index_search_restore(executor_cls, shards, tmpdir, stateful_ex shards=shards, volumes=[str(tmpdir) + ':' + '/workspace'], peer_ports=peer_ports, - polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'} + polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'}, ) with dep_restore: index_da = DocumentArray[TextDocWithId]( [TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100, 200)] ) - dep_restore.index(inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]) + dep_restore.index( + inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId] + ) time.sleep(20) - search_da = DocumentArray[TextDocWithId]([TextDocWithId(id=f'{i}') for i in range(200)]) + search_da = DocumentArray[TextDocWithId]( + [TextDocWithId(id=f'{i}') for i in range(200)] + ) assert_all_replicas_indexed(dep_restore, search_da) time.sleep(10) @pytest.mark.skipif(not docarray_v2, reason='tests support for docarray>=0.30') @pytest.mark.parametrize('shards', [1, 2]) -def test_stateful_index_search_container(shards, tmpdir, stateful_exec_docker_image_built): +def test_stateful_index_search_container( + shards, tmpdir, stateful_exec_docker_image_built +): replicas = 3 peer_ports = {} for shard in range(shards): @@ -205,14 +236,18 @@ def test_stateful_index_search_container(shards, tmpdir, stateful_exec_docker_im workspace='/workspace/tmp', volumes=[str(tmpdir) + ':' + '/workspace/tmp'], peer_ports=peer_ports, - polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'} + polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'}, ) with dep: index_da = DocumentArray[TextDocWithId]( [TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100)] ) - search_da = DocumentArray[TextDocWithId]([TextDocWithId(id=f'{i}') for i in range(100)]) - dep.index(inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]) + search_da = DocumentArray[TextDocWithId]( + [TextDocWithId(id=f'{i}') for i in range(100)] + ) + dep.index( + inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId] + ) # allowing some time for the state to be replicated time.sleep(20) @@ -235,16 +270,20 @@ def test_stateful_index_search_container(shards, tmpdir, stateful_exec_docker_im workspace='/workspace/tmp', volumes=[str(tmpdir) + ':' + '/workspace/tmp'], peer_ports=peer_ports, - polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'} + polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'}, ) # test restoring with dep_restore: index_da = DocumentArray[TextDocWithId]( [TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100, 200)] ) - dep_restore.index(inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]) + dep_restore.index( + inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId] + ) time.sleep(20) - search_da = DocumentArray[TextDocWithId]([TextDocWithId(id=f'{i}') for i in range(200)]) + search_da = DocumentArray[TextDocWithId]( + [TextDocWithId(id=f'{i}') for i in range(200)] + ) assert_all_replicas_indexed(dep_restore, search_da, key='num') time.sleep(10) @@ -254,6 +293,7 @@ def test_stateful_index_search_container(shards, tmpdir, stateful_exec_docker_im def test_add_new_replica(executor_cls, tmpdir): from jina.parsers import set_pod_parser from jina.orchestrate.pods.factory import PodFactory + gateway_port = random_port() replicas = 3 peer_ports = {} @@ -303,14 +343,19 @@ def test_add_new_replica(executor_cls, tmpdir): time.sleep(10) - index_da = DocumentArray[TextDocWithId]( [TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100, 200)] ) - ctx_mngr.index(inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]) + ctx_mngr.index( + inputs=index_da, + request_size=1, + return_type=DocumentArray[TextDocWithId], + ) time.sleep(20) - search_da = DocumentArray[TextDocWithId]([TextDocWithId(id=f'{i}') for i in range(200)]) + search_da = DocumentArray[TextDocWithId]( + [TextDocWithId(id=f'{i}') for i in range(200)] + ) client = Client(port=new_replica_port) assert_is_indexed(client, search_da=search_da) time.sleep(10) diff --git a/tests/integration/streaming/test_clients_streaming.py b/tests/integration/streaming/test_clients_streaming.py index d85f19ca9058e..de5d0fa463933 100644 --- a/tests/integration/streaming/test_clients_streaming.py +++ b/tests/integration/streaming/test_clients_streaming.py @@ -124,7 +124,7 @@ def test_disable_prefetch_slow_client_fast_executor(protocol, inputs, use_stream inputs=inputs, request_size=1, on_done=lambda response: on_done(response, final_da), - stream=use_stream + stream=use_stream, ) assert len(final_da) == INPUT_LEN @@ -168,7 +168,7 @@ def test_disable_prefetch_fast_client_slow_executor(protocol, inputs, use_stream inputs=inputs, request_size=1, on_done=lambda response: on_done(response, final_da), - stream=use_stream + stream=use_stream, ) assert len(final_da) == INPUT_LEN @@ -242,7 +242,11 @@ async def malicious_client_gen(): def client(gen, port): Client(protocol=protocol, port=port, prefetch=prefetch).post( - on='/index', inputs=gen, request_size=1, return_responses=True, stream=use_stream + on='/index', + inputs=gen, + request_size=1, + return_responses=True, + stream=use_stream, ) pool: List[Process] = [] @@ -271,7 +275,12 @@ def client(gen, port): order_of_ids = list( Client(protocol=protocol, port=f.port, prefetch=prefetch) - .post(on='/status', inputs=[Document()], return_responses=True, stream=use_stream)[0] + .post( + on='/status', + inputs=[Document()], + return_responses=True, + stream=use_stream, + )[0] .docs[0] .tags['ids'] ) @@ -291,7 +300,9 @@ def client(gen, port): When there are no rules, badguy wins! With rule, you find balance in the world. """ - if prefetch == 5 and use_stream: # if stream is False the prefetch is controleed by each client and then it applies per client + if ( + prefetch == 5 and use_stream + ): # if stream is False the prefetch is controleed by each client and then it applies per client assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {'badguy'} elif prefetch == 0: assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {'goodguy'} diff --git a/tests/integration/test_return_order/test_return_order.py b/tests/integration/test_return_order/test_return_order.py index f8ded2b709555..3a94406fc7b51 100644 --- a/tests/integration/test_return_order/test_return_order.py +++ b/tests/integration/test_return_order/test_return_order.py @@ -21,14 +21,22 @@ def foo(self, *args, **kwargs): with f: for _ in range(5): result_flow = f.post( - '/', inputs=input_da, request_size=10, results_in_order=True, stream=stream + '/', + inputs=input_da, + request_size=10, + results_in_order=True, + stream=stream, ) for input, output in zip(input_da, result_flow): assert input.text == output.text c = Client(port=f.port, protocol=str(f.protocol)) for _ in range(5): result_client = c.post( - '/', inputs=input_da, request_size=10, results_in_order=True, stream=stream + '/', + inputs=input_da, + request_size=10, + results_in_order=True, + stream=stream, ) for input, output in zip(input_da, result_client): assert input.text == output.text diff --git a/tests/jinahub/app.py b/tests/jinahub/app.py index e215310e829b7..0372264939949 100644 --- a/tests/jinahub/app.py +++ b/tests/jinahub/app.py @@ -1,5 +1,6 @@ from jina import Flow import os + os.environ['JINA_LOG_LEVEL'] = 'DEBUG' if __name__ == '__main__': diff --git a/tests/k8s/conftest.py b/tests/k8s/conftest.py index 419431d990424..886cd7e4de473 100644 --- a/tests/k8s/conftest.py +++ b/tests/k8s/conftest.py @@ -195,7 +195,7 @@ def build_docker_image(image_name: str, image_name_tag_map: Dict[str, str]) -> s def set_test_pip_version() -> None: os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip' yield - if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed + if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed del os.environ['JINA_GATEWAY_IMAGE'] diff --git a/tests/k8s/test_k8s_deployment.py b/tests/k8s/test_k8s_deployment.py index 2a82c5da23d2b..2f1fd9691fc94 100644 --- a/tests/k8s/test_k8s_deployment.py +++ b/tests/k8s/test_k8s_deployment.py @@ -207,7 +207,7 @@ async def test_deployment_serve_k8s( indirect=True, ) async def test_deployment_with_multiple_protocols( - logger, docker_images, tmpdir, k8s_cluster + logger, docker_images, tmpdir, k8s_cluster ): from kubernetes import client @@ -241,13 +241,25 @@ async def test_deployment_with_multiple_protocols( grpc_port = GrpcConnectionPool.K8S_PORT http_port = GrpcConnectionPool.K8S_PORT + 1 - with shell_portforward(k8s_cluster._cluster.kubectl_path, pod_or_service='service/test-executor-1-http', port1=http_port, port2=http_port, namespace=namespace): + with shell_portforward( + k8s_cluster._cluster.kubectl_path, + pod_or_service='service/test-executor-1-http', + port1=http_port, + port2=http_port, + namespace=namespace, + ): import requests resp = requests.get(f'http://localhost:{http_port}').json() assert resp == {} - with shell_portforward(k8s_cluster._cluster.kubectl_path, pod_or_service='service/test-executor', port1=grpc_port, port2=grpc_port, namespace=namespace): + with shell_portforward( + k8s_cluster._cluster.kubectl_path, + pod_or_service='service/test-executor', + port1=grpc_port, + port2=grpc_port, + namespace=namespace, + ): grpc_client = Client(protocol='grpc', port=grpc_port, asyncio=True) async for _ in grpc_client.post('/', inputs=DocumentArray.empty(5)): pass diff --git a/tests/k8s/test_k8s_flow.py b/tests/k8s/test_k8s_flow.py index 81ca6b81c8992..278b7fde4d316 100644 --- a/tests/k8s/test_k8s_flow.py +++ b/tests/k8s/test_k8s_flow.py @@ -124,6 +124,7 @@ async def create_all_flow_deployments_and_wait_ready( async def run_test(flow, core_client, namespace, endpoint, n_docs=10, request_size=100): # start port forwarding from jina.clients import Client + port = GrpcConnectionPool.K8S_PORT gateway_pod_name = ( @@ -136,9 +137,7 @@ async def run_test(flow, core_client, namespace, endpoint, n_docs=10, request_si config_path = os.environ['KUBECONFIG'] import portforward - with portforward.forward( - namespace, gateway_pod_name, port, port, config_path - ): + with portforward.forward(namespace, gateway_pod_name, port, port, config_path): client_kwargs = dict( host='localhost', port=port, @@ -965,7 +964,9 @@ async def test_flow_with_external_k8s_deployment(logger, docker_images, tmpdir): [['jinaai/jina']], indirect=True, ) -async def test_flow_with_metadata_k8s_deployment(logger, grpc_metadata, docker_images, tmpdir): +async def test_flow_with_metadata_k8s_deployment( + logger, grpc_metadata, docker_images, tmpdir +): from kubernetes import client namespace = 'test-flow-with-metadata-k8s-deployment'.lower() @@ -1448,7 +1449,10 @@ async def test_flow_with_stateful_executor( app_client = client.AppsV1Api(api_client=api_client) try: dump_path = os.path.join(str(tmpdir), 'test-flow-with-volumes') - flow = Flow(name='test-flow-with-volumes', protocol='http',).add( + flow = Flow( + name='test-flow-with-volumes', + protocol='http', + ).add( name='statefulexecutor', uses=f'docker://{docker_images[0]}', workspace=f'{str(tmpdir)}/workspace_path', @@ -1551,7 +1555,9 @@ async def test_really_slow_executor_liveness_probe_works(docker_images, tmpdir, try: dump_path = os.path.join(str(tmpdir), 'test-flow-slow-process-executor') - flow = Flow(name='test-flow-slow-process-executor',).add( + flow = Flow( + name='test-flow-slow-process-executor', + ).add( name='slow_process_executor', uses=f'docker://{docker_images[0]}', uses_with={'time_sleep': 20}, diff --git a/tests/k8s/test_k8s_graceful_request_handling.py b/tests/k8s/test_k8s_graceful_request_handling.py index 7dfbf3e96cadb..efee831f565e1 100644 --- a/tests/k8s/test_k8s_graceful_request_handling.py +++ b/tests/k8s/test_k8s_graceful_request_handling.py @@ -138,7 +138,9 @@ async def async_inputs(): 'docker_images', [['slow-process-executor', 'jinaai/jina']], indirect=True ) async def test_no_message_lost_during_scaling(logger, docker_images, tmpdir): - flow = Flow(name='test-flow-slow-process-executor',).add( + flow = Flow( + name='test-flow-slow-process-executor', + ).add( name='slow_process_executor', uses=f'docker://{docker_images[0]}', replicas=3, @@ -241,7 +243,9 @@ async def test_no_message_lost_during_scaling(logger, docker_images, tmpdir): 'docker_images', [['slow-process-executor', 'jinaai/jina']], indirect=True ) async def test_no_message_lost_during_kill(logger, docker_images, tmpdir): - flow = Flow(name='test-flow-slow-process-executor',).add( + flow = Flow( + name='test-flow-slow-process-executor', + ).add( name='slow_process_executor', uses=f'docker://{docker_images[0]}', replicas=3, @@ -350,7 +354,9 @@ async def test_no_message_lost_during_kill(logger, docker_images, tmpdir): 'docker_images', [['slow-process-executor', 'jinaai/jina']], indirect=True ) async def test_linear_processing_time_scaling(docker_images, logger, tmpdir): - flow = Flow(name='test-flow-slow-process-executor',).add( + flow = Flow( + name='test-flow-slow-process-executor', + ).add( name='slow_process_executor', uses=f'docker://{docker_images[0]}', replicas=3, diff --git a/tests/k8s_otel/conftest.py b/tests/k8s_otel/conftest.py index 1e70bfa991c66..333d43c4daddb 100644 --- a/tests/k8s_otel/conftest.py +++ b/tests/k8s_otel/conftest.py @@ -11,6 +11,7 @@ # This can and probably should be put in env variable actually. cluster.KIND_VERSION = 'v0.11.1' + # TODO: Can we get jina image to build here as well? @pytest.fixture(scope='session', autouse=True) def build_and_load_images(k8s_cluster_v2: KindClusterWrapperV2) -> None: @@ -21,7 +22,7 @@ def build_and_load_images(k8s_cluster_v2: KindClusterWrapperV2) -> None: k8s_cluster_v2.load_docker_image(image_name='jinaai/jina', tag='test-pip') os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip' yield - if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed + if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed del os.environ['JINA_GATEWAY_IMAGE'] k8s_cluster_v2.remove_docker_image('test-instrumentation', 'test-pip') diff --git a/tests/unit/clients/python/test_client_errors.py b/tests/unit/clients/python/test_client_errors.py index ef94d8d5256a4..59a70888bf3fd 100644 --- a/tests/unit/clients/python/test_client_errors.py +++ b/tests/unit/clients/python/test_client_errors.py @@ -41,7 +41,10 @@ def test_grpc_stream_transient_error_iterable_input(port_generator, mocker): random_port = port_generator() stop_event = multiprocessing.Event() start_event = multiprocessing.Event() - t = multiprocessing.Process(target=_start_runtime, args=('grpc', random_port, 'flow', stop_event, start_event)) + t = multiprocessing.Process( + target=_start_runtime, + args=('grpc', random_port, 'flow', stop_event, start_event), + ) t.start() start_event.wait(5) max_attempts = 5 @@ -66,12 +69,14 @@ def test_grpc_stream_transient_error_iterable_input(port_generator, mocker): on_error_mock.assert_not_called() except ConnectionError as err: - sync_wait_or_raise_err(attempt=attempt, - err=err, - max_attempts=max_attempts, - backoff_multiplier=backoff_multiplier, - initial_backoff=initial_backoff, - max_backoff=max_backoff) + sync_wait_or_raise_err( + attempt=attempt, + err=err, + max_attempts=max_attempts, + backoff_multiplier=backoff_multiplier, + initial_backoff=initial_backoff, + max_backoff=max_backoff, + ) finally: stop_event.set() t.join(5) @@ -81,13 +86,15 @@ def test_grpc_stream_transient_error_iterable_input(port_generator, mocker): @pytest.mark.timeout(90) @pytest.mark.parametrize('flow_or_deployment', ['deployment', 'flow']) def test_grpc_stream_transient_error_docarray_input( - flow_or_deployment, port_generator, mocker + flow_or_deployment, port_generator, mocker ): random_port = port_generator() stop_event = multiprocessing.Event() start_event = multiprocessing.Event() - t = multiprocessing.Process(target=_start_runtime, - args=('grpc', random_port, flow_or_deployment, stop_event, start_event)) + t = multiprocessing.Process( + target=_start_runtime, + args=('grpc', random_port, flow_or_deployment, stop_event, start_event), + ) t.start() start_event.wait(5) num_docs = 10 @@ -121,13 +128,15 @@ def test_grpc_stream_transient_error_docarray_input( @pytest.mark.parametrize('flow_or_deployment', ['deployment', 'flow']) @pytest.mark.ignore async def test_async_grpc_stream_transient_error( - flow_or_deployment, port_generator, mocker + flow_or_deployment, port_generator, mocker ): random_port = port_generator() stop_event = multiprocessing.Event() start_event = multiprocessing.Event() - t = multiprocessing.Process(target=_start_runtime, - args=('grpc', random_port, flow_or_deployment, stop_event, start_event)) + t = multiprocessing.Process( + target=_start_runtime, + args=('grpc', random_port, flow_or_deployment, stop_event, start_event), + ) t.start() start_event.wait(5) max_attempts = 5 @@ -173,7 +182,7 @@ async def test_async_grpc_stream_transient_error( @pytest.mark.parametrize('flow_or_deployment', ['flow', 'deployment']) @pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket']) def test_sync_clients_max_attempts_transient_error( - mocker, flow_or_deployment, protocol, port_generator + mocker, flow_or_deployment, protocol, port_generator ): if flow_or_deployment == 'deployment' and protocol in ['websocket', 'http']: return @@ -181,8 +190,10 @@ def test_sync_clients_max_attempts_transient_error( client = Client(host=f'{protocol}://localhost:{random_port}') stop_event = multiprocessing.Event() start_event = multiprocessing.Event() - t = multiprocessing.Process(target=_start_runtime, - args=(protocol, random_port, flow_or_deployment, stop_event, start_event)) + t = multiprocessing.Process( + target=_start_runtime, + args=(protocol, random_port, flow_or_deployment, stop_event, start_event), + ) t.start() start_event.wait(5) try: diff --git a/tests/unit/clients/test_asyncio.py b/tests/unit/clients/test_asyncio.py index 48d7128cfd9a5..2e044889b5154 100644 --- a/tests/unit/clients/test_asyncio.py +++ b/tests/unit/clients/test_asyncio.py @@ -50,7 +50,9 @@ async def input_function(): yield 42 with pytest.raises(TypeError): - async for req in request_generator(exec_endpoint='/', data=input_function(), request_size=REQUEST_SIZE): + async for req in request_generator( + exec_endpoint='/', data=input_function(), request_size=REQUEST_SIZE + ): print(req.docs.summary()) async def input_function(): @@ -58,5 +60,7 @@ async def input_function(): yield 42 with pytest.raises(ValueError): - async for req in request_generator(exec_endpoint='/', data=input_function(), request_size=REQUEST_SIZE): + async for req in request_generator( + exec_endpoint='/', data=input_function(), request_size=REQUEST_SIZE + ): print(req.docs.summary()) diff --git a/tests/unit/orchestrate/deployments/config/test_docker_compose_pod_config.py b/tests/unit/orchestrate/deployments/config/test_docker_compose_pod_config.py index 93061cdf2e689..96e2451e0d1f4 100644 --- a/tests/unit/orchestrate/deployments/config/test_docker_compose_pod_config.py +++ b/tests/unit/orchestrate/deployments/config/test_docker_compose_pod_config.py @@ -376,14 +376,18 @@ def test_worker_services(name: str, shards: str): (['12345', '12344', '12343'], ['grpc', 'http', 'websocket']), ], ) -@pytest.mark.parametrize('custom_gateway', ['jinaai+docker://jina/custom-gateway', None]) -def test_docker_compose_gateway(monkeypatch, deployments_addresses, custom_gateway, port, protocol): +@pytest.mark.parametrize( + 'custom_gateway', ['jinaai+docker://jina/custom-gateway', None] +) +def test_docker_compose_gateway( + monkeypatch, deployments_addresses, custom_gateway, port, protocol +): from hubble.executor.hubio import HubExecutor, HubIO def _mock_fetch( - name, - *args, - **kwargs, + name, + *args, + **kwargs, ): return ( HubExecutor( @@ -527,10 +531,7 @@ def _mock_fetch( if shards > 1: head_name, head_config = yaml_configs[0] assert head_name == 'executor-head' - assert ( - head_config['image'] - == f'jinaai/jina:test-pip' - ) + assert head_config['image'] == f'jinaai/jina:test-pip' assert head_config['entrypoint'] == ['jina'] head_args = head_config['command'] assert head_args[0] == 'executor' diff --git a/tests/unit/orchestrate/deployments/config/test_k8s_deployment_config.py b/tests/unit/orchestrate/deployments/config/test_k8s_deployment_config.py index 2da5fb318ee0a..9c2e0b7934052 100644 --- a/tests/unit/orchestrate/deployments/config/test_k8s_deployment_config.py +++ b/tests/unit/orchestrate/deployments/config/test_k8s_deployment_config.py @@ -14,7 +14,7 @@ def namespace_equal( - n1: Union[Namespace, Dict], n2: Union[Namespace, Dict], skip_attr: Tuple = () + n1: Union[Namespace, Dict], n2: Union[Namespace, Dict], skip_attr: Tuple = () ) -> bool: """ Checks that two `Namespace` object have equal public attributes. @@ -40,11 +40,11 @@ def namespace_equal( @pytest.mark.parametrize('uses_with', ['{"paramkey": "paramvalue"}', None]) @pytest.mark.parametrize('uses_metas', ['{"workspace": "workspacevalue"}', None]) def test_parse_args( - shards: int, - uses_with, - uses_metas, - uses_before, - uses_after, + shards: int, + uses_with, + uses_metas, + uses_before, + uses_after, ): args_list = ['--shards', str(shards), '--name', 'executor'] if uses_before is not None: @@ -82,53 +82,53 @@ def test_parse_args( ), ) assert ( - deployment_config.deployment_args['head_deployment'].k8s_namespace - == 'default-namespace' + deployment_config.deployment_args['head_deployment'].k8s_namespace + == 'default-namespace' ) assert ( - deployment_config.deployment_args['head_deployment'].name == 'executor/head' + deployment_config.deployment_args['head_deployment'].name == 'executor/head' ) assert ( - deployment_config.deployment_args['head_deployment'].runtime_cls - == 'HeadRuntime' + deployment_config.deployment_args['head_deployment'].runtime_cls + == 'HeadRuntime' ) assert deployment_config.deployment_args['head_deployment'].uses is None assert ( - deployment_config.deployment_args['head_deployment'].uses_before - == uses_before + deployment_config.deployment_args['head_deployment'].uses_before + == uses_before ) assert ( - deployment_config.deployment_args['head_deployment'].uses_after - == uses_after + deployment_config.deployment_args['head_deployment'].uses_after + == uses_after ) assert deployment_config.deployment_args['head_deployment'].uses_metas is None assert deployment_config.deployment_args['head_deployment'].uses_with is None if uses_before is None: assert ( - deployment_config.deployment_args['head_deployment'].uses_before_address - is None + deployment_config.deployment_args['head_deployment'].uses_before_address + is None ) else: assert ( - deployment_config.deployment_args['head_deployment'].uses_before_address - == '127.0.0.1:8078' + deployment_config.deployment_args['head_deployment'].uses_before_address + == '127.0.0.1:8078' ) if uses_after is None: assert ( - deployment_config.deployment_args['head_deployment'].uses_after_address - is None + deployment_config.deployment_args['head_deployment'].uses_after_address + is None ) else: assert ( - deployment_config.deployment_args['head_deployment'].uses_after_address - == '127.0.0.1:8079' + deployment_config.deployment_args['head_deployment'].uses_after_address + == '127.0.0.1:8079' ) candidate_connection_list = { str(i): f'executor-{i}.default-namespace.svc:8080' for i in range(shards) } assert deployment_config.deployment_args[ - 'head_deployment' - ].connection_list == json.dumps(candidate_connection_list) + 'head_deployment' + ].connection_list == json.dumps(candidate_connection_list) for i, depl_arg in enumerate(deployment_config.deployment_args['deployments']): import copy @@ -178,25 +178,25 @@ def test_parse_args_custom_executor(shards: int): if shards > 1: assert ( - deployment_config.deployment_args['head_deployment'].runtime_cls - == 'HeadRuntime' + deployment_config.deployment_args['head_deployment'].runtime_cls + == 'HeadRuntime' ) assert ( - deployment_config.deployment_args['head_deployment'].uses_before - == uses_before + deployment_config.deployment_args['head_deployment'].uses_before + == uses_before ) assert deployment_config.deployment_args['head_deployment'].uses is None assert ( - deployment_config.deployment_args['head_deployment'].uses_after - == uses_after + deployment_config.deployment_args['head_deployment'].uses_after + == uses_after ) assert ( - deployment_config.deployment_args['head_deployment'].uses_before_address - == f'127.0.0.1:{GrpcConnectionPool.K8S_PORT_USES_BEFORE}' + deployment_config.deployment_args['head_deployment'].uses_before_address + == f'127.0.0.1:{GrpcConnectionPool.K8S_PORT_USES_BEFORE}' ) assert ( - deployment_config.deployment_args['head_deployment'].uses_after_address - == f'127.0.0.1:{GrpcConnectionPool.K8S_PORT_USES_AFTER}' + deployment_config.deployment_args['head_deployment'].uses_after_address + == f'127.0.0.1:{GrpcConnectionPool.K8S_PORT_USES_AFTER}' ) for i, depl_arg in enumerate(deployment_config.deployment_args['deployments']): @@ -227,16 +227,16 @@ def test_parse_args_custom_executor(shards: int): ['name', 'shards'], [ ( - 'gateway', - '1', + 'gateway', + '1', ), ( - 'test-deployment', - '1', + 'test-deployment', + '1', ), ( - 'test-deployment', - '2', + 'test-deployment', + '2', ), ], ) @@ -266,7 +266,7 @@ def test_deployments(name: str, shards: str, gpus): def assert_config_map_config( - config_map: Dict, base_name: str, expected_config_map_data: Dict + config_map: Dict, base_name: str, expected_config_map_data: Dict ): assert config_map['kind'] == 'ConfigMap' assert config_map['metadata'] == { @@ -283,27 +283,31 @@ def _custom_patched_resolve_image_name(uses: str): :return: image name equivalent """ - from jina.constants import __default_executor__, __default_http_gateway__, __default_composite_gateway__, \ - __default_grpc_gateway__, __default_websocket_gateway__ + from jina.constants import ( + __default_executor__, + __default_http_gateway__, + __default_composite_gateway__, + __default_grpc_gateway__, + __default_websocket_gateway__, + ) from jina.orchestrate.deployments.config.helper import get_image_name import os + if uses == 'jinaai+docker://jina/custom-gateway': return 'jinaai+docker://jina/custom-gateway' - if uses in [__default_http_gateway__, - __default_websocket_gateway__, - __default_grpc_gateway__, - __default_composite_gateway__]: - image_name = os.getenv( - 'JINA_GATEWAY_IMAGE', None - ) + if uses in [ + __default_http_gateway__, + __default_websocket_gateway__, + __default_grpc_gateway__, + __default_composite_gateway__, + ]: + image_name = os.getenv('JINA_GATEWAY_IMAGE', None) if image_name is None: image_name = get_image_name('jinaai+docker://JinaGateway:latest') elif uses is not None and uses != __default_executor__: image_name = get_image_name(uses) else: - image_name = os.getenv( - 'JINA_GATEWAY_IMAGE', None - ) + image_name = os.getenv('JINA_GATEWAY_IMAGE', None) if image_name is None: image_name = get_image_name('jinaai+docker://JinaGateway:latest') @@ -322,13 +326,15 @@ def _custom_patched_resolve_image_name(uses: str): ) @pytest.mark.parametrize('custom_gateway', ['jinaai+docker://jina/custom-gateway']) @pytest.mark.parametrize('replicas', [1, 2]) -def test_k8s_yaml_gateway(monkeypatch, deployments_addresses, custom_gateway, port, protocol, replicas): +def test_k8s_yaml_gateway( + monkeypatch, deployments_addresses, custom_gateway, port, protocol, replicas +): from hubble.executor.hubio import HubExecutor, HubIO def _mock_fetch( - name, - *args, - **kwargs, + name, + *args, + **kwargs, ): return ( HubExecutor( @@ -351,7 +357,7 @@ def _mock_fetch( '--port', *port, '--replicas', - str(replicas) + str(replicas), ] if deployments_addresses: args_list.extend(['--deployments-addresses', json.dumps(deployments_addresses)]) @@ -379,7 +385,7 @@ def _mock_fetch( }, ) - for i, (expected_port, service) in enumerate(zip(port, configs[1: 1 + len(port)])): + for i, (expected_port, service) in enumerate(zip(port, configs[1 : 1 + len(port)])): assert service['kind'] == 'Service' service_gateway_name = ( 'gateway' @@ -410,7 +416,9 @@ def _mock_fetch( 'namespace': 'default-namespace', } spec_deployment = deployment['spec'] - assert spec_deployment['replicas'] == replicas # gateway replication is only enabled for k8s + assert ( + spec_deployment['replicas'] == replicas + ) # gateway replication is only enabled for k8s assert spec_deployment['strategy'] == { 'type': 'RollingUpdate', 'rollingUpdate': {'maxSurge': 1, 'maxUnavailable': 0}, @@ -445,7 +453,9 @@ def _mock_fetch( assert args[args.index('--k8s-namespace') + 1] == 'default-namespace' assert '--port' in args for i, _port in enumerate(port): - assert args[args.index('--port') + i + 1] == str(GrpcConnectionPool.K8S_PORT + i) + assert args[args.index('--port') + i + 1] == str( + GrpcConnectionPool.K8S_PORT + i + ) assert '--env' not in args if deployments_addresses is not None: assert '--deployments-addresses' in args @@ -487,19 +497,19 @@ def assert_port_config(port_dict: Dict, name: str, port: int): @pytest.mark.parametrize('uses_metas', ['{"workspace": "workspacevalue"}', None]) @pytest.mark.parametrize('polling', ['ANY', 'ALL']) def test_k8s_yaml_regular_deployment( - uses_before, - uses_after, - uses, - shards, - uses_with, - uses_metas, - polling, - monkeypatch, + uses_before, + uses_after, + uses, + shards, + uses_with, + uses_metas, + polling, + monkeypatch, ): def _mock_fetch( - name, - *args, - **kwargs, + name, + *args, + **kwargs, ): return ( HubExecutor( @@ -551,7 +561,7 @@ def _mock_fetch( head_name, head_configs = yaml_configs[0] assert head_name == 'executor-head' assert ( - len(head_configs) == 3 + len(head_configs) == 3 ) # 3 configs per yaml (configmap, service and deployment) config_map = head_configs[0] assert_config_map_config( @@ -612,10 +622,7 @@ def _mock_fetch( ) head_runtime_container = head_containers[0] assert head_runtime_container['name'] == 'executor' - assert ( - head_runtime_container['image'] - == f'jinaai/jina:test-pip' - ) + assert head_runtime_container['image'] == f'jinaai/jina:test-pip' assert head_runtime_container['imagePullPolicy'] == 'IfNotPresent' assert head_runtime_container['command'] == ['jina'] head_runtime_container_args = head_runtime_container['args'] @@ -624,40 +631,40 @@ def _mock_fetch( assert '--native' in head_runtime_container_args assert '--runtime-cls' in head_runtime_container_args assert ( - head_runtime_container_args[ - head_runtime_container_args.index('--runtime-cls') + 1 - ] - == 'HeadRuntime' + head_runtime_container_args[ + head_runtime_container_args.index('--runtime-cls') + 1 + ] + == 'HeadRuntime' ) assert '--name' in head_runtime_container_args assert ( - head_runtime_container_args[head_runtime_container_args.index('--name') + 1] - == 'executor/head' + head_runtime_container_args[head_runtime_container_args.index('--name') + 1] + == 'executor/head' ) assert '--k8s-namespace' in head_runtime_container_args assert ( - head_runtime_container_args[ - head_runtime_container_args.index('--k8s-namespace') + 1 - ] - == 'default-namespace' + head_runtime_container_args[ + head_runtime_container_args.index('--k8s-namespace') + 1 + ] + == 'default-namespace' ) assert '--port' in head_runtime_container_args assert ( - head_runtime_container_args[head_runtime_container_args.index('--port') + 1] - == '8080' + head_runtime_container_args[head_runtime_container_args.index('--port') + 1] + == '8080' ) assert '--env' not in head_runtime_container_args assert '--pod-role' in head_runtime_container_args assert ( - head_runtime_container_args[ - head_runtime_container_args.index('--pod-role') + 1 - ] - == 'HEAD' + head_runtime_container_args[ + head_runtime_container_args.index('--pod-role') + 1 + ] + == 'HEAD' ) assert '--connection-list' in head_runtime_container_args connection_list_string = head_runtime_container_args[ head_runtime_container_args.index('--connection-list') + 1 - ] + ] assert connection_list_string == json.dumps( { str(shard_id): f'executor-{shard_id}.default-namespace.svc:8080' @@ -670,10 +677,10 @@ def _mock_fetch( else: assert '--polling' in head_runtime_container_args assert ( - head_runtime_container_args[ - head_runtime_container_args.index('--polling') + 1 - ] - == 'ALL' + head_runtime_container_args[ + head_runtime_container_args.index('--polling') + 1 + ] + == 'ALL' ) if uses_before is not None: @@ -691,24 +698,24 @@ def _mock_fetch( assert '--native' in uses_before_runtime_container_args assert '--name' in uses_before_runtime_container_args assert ( - uses_before_runtime_container_args[ - uses_before_runtime_container_args.index('--name') + 1 - ] - == 'executor/uses-before' + uses_before_runtime_container_args[ + uses_before_runtime_container_args.index('--name') + 1 + ] + == 'executor/uses-before' ) assert '--k8s-namespace' in uses_before_runtime_container_args assert ( - uses_before_runtime_container_args[ - uses_before_runtime_container_args.index('--k8s-namespace') + 1 - ] - == 'default-namespace' + uses_before_runtime_container_args[ + uses_before_runtime_container_args.index('--k8s-namespace') + 1 + ] + == 'default-namespace' ) assert '--port' in uses_before_runtime_container_args assert ( - uses_before_runtime_container_args[ - uses_before_runtime_container_args.index('--port') + 1 - ] - == '8078' + uses_before_runtime_container_args[ + uses_before_runtime_container_args.index('--port') + 1 + ] + == '8078' ) assert '--env' not in uses_before_runtime_container_args assert '--connection-list' not in uses_before_runtime_container_args @@ -728,24 +735,24 @@ def _mock_fetch( assert '--native' in uses_after_runtime_container_args assert '--name' in uses_after_runtime_container_args assert ( - uses_after_runtime_container_args[ - uses_after_runtime_container_args.index('--name') + 1 - ] - == 'executor/uses-after' + uses_after_runtime_container_args[ + uses_after_runtime_container_args.index('--name') + 1 + ] + == 'executor/uses-after' ) assert '--k8s-namespace' in uses_after_runtime_container_args assert ( - uses_after_runtime_container_args[ - uses_after_runtime_container_args.index('--k8s-namespace') + 1 - ] - == 'default-namespace' + uses_after_runtime_container_args[ + uses_after_runtime_container_args.index('--k8s-namespace') + 1 + ] + == 'default-namespace' ) assert '--port' in uses_after_runtime_container_args assert ( - uses_after_runtime_container_args[ - uses_after_runtime_container_args.index('--port') + 1 - ] - == '8079' + uses_after_runtime_container_args[ + uses_after_runtime_container_args.index('--port') + 1 + ] + == '8079' ) assert '--env' not in uses_after_runtime_container_args assert '--connection-list' not in uses_after_runtime_container_args @@ -754,7 +761,7 @@ def _mock_fetch( name = f'executor-{i}' if shards > 1 else 'executor' assert shard_name == name assert ( - len(shard_configs) == 3 + len(shard_configs) == 3 ) # 3 configs per yaml (configmap, service and deployment config_map = shard_configs[0] assert_config_map_config( @@ -823,24 +830,24 @@ def _mock_fetch( assert '--native' in shard_container_runtime_container_args assert '--name' in shard_container_runtime_container_args assert ( - shard_container_runtime_container_args[ - shard_container_runtime_container_args.index('--name') + 1 - ] - == name + shard_container_runtime_container_args[ + shard_container_runtime_container_args.index('--name') + 1 + ] + == name ) assert '--k8s-namespace' in shard_container_runtime_container_args assert ( - shard_container_runtime_container_args[ - shard_container_runtime_container_args.index('--k8s-namespace') + 1 - ] - == 'default-namespace' + shard_container_runtime_container_args[ + shard_container_runtime_container_args.index('--k8s-namespace') + 1 + ] + == 'default-namespace' ) assert '--port' in shard_container_runtime_container_args assert ( - shard_container_runtime_container_args[ - shard_container_runtime_container_args.index('--port') + 1 - ] - == '8080' + shard_container_runtime_container_args[ + shard_container_runtime_container_args.index('--port') + 1 + ] + == '8080' ) assert '--env' not in shard_container_runtime_container_args assert '--connection-list' not in shard_container_runtime_container_args @@ -848,10 +855,10 @@ def _mock_fetch( if uses_with is not None: assert '--uses-with' in shard_container_runtime_container_args assert ( - shard_container_runtime_container_args[ - shard_container_runtime_container_args.index('--uses-with') + 1 - ] - == uses_with + shard_container_runtime_container_args[ + shard_container_runtime_container_args.index('--uses-with') + 1 + ] + == uses_with ) else: assert '--uses-with' not in shard_container_runtime_container_args @@ -861,8 +868,8 @@ def _mock_fetch( expected_uses_metas = json.loads(uses_metas) assert '--uses-metas' in shard_container_runtime_container_args assert shard_container_runtime_container_args[ - shard_container_runtime_container_args.index('--uses-metas') + 1 - ] == json.dumps(expected_uses_metas) + shard_container_runtime_container_args.index('--uses-metas') + 1 + ] == json.dumps(expected_uses_metas) def test_executor_with_volumes_stateful_set(): @@ -876,12 +883,12 @@ def test_executor_with_volumes_stateful_set(): assert sset['kind'] == 'StatefulSet' assert 'volumeClaimTemplates' in list(sset['spec'].keys()) assert ( - sset['spec']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] - == 'executor-volume' + sset['spec']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] + == 'executor-volume' ) assert ( - sset['spec']['template']['spec']['containers'][0]['volumeMounts'][0][ - 'mountPath' - ] - == 'path/volumes' + sset['spec']['template']['spec']['containers'][0]['volumeMounts'][0][ + 'mountPath' + ] + == 'path/volumes' ) diff --git a/tests/unit/orchestrate/flow/flow-async/test_asyncflow.py b/tests/unit/orchestrate/flow/flow-async/test_asyncflow.py index 5ccbc75dc4df7..df98e4cc14214 100644 --- a/tests/unit/orchestrate/flow/flow-async/test_asyncflow.py +++ b/tests/unit/orchestrate/flow/flow-async/test_asyncflow.py @@ -42,15 +42,15 @@ def documents(start_index, end_index): ) @pytest.mark.parametrize('use_stream', [False, True]) async def test_run_async_flow( - protocol, mocker, flow_cls, return_responses, return_class, use_stream + protocol, mocker, flow_cls, return_responses, return_class, use_stream ): r_val = mocker.Mock() with flow_cls(protocol=protocol, asyncio=True).add() as f: async for r in f.index( - from_ndarray(np.random.random([num_docs, 4])), - on_done=r_val, - return_responses=return_responses, - stream=use_stream + from_ndarray(np.random.random([num_docs, 4])), + on_done=r_val, + return_responses=return_responses, + stream=use_stream, ): assert isinstance(r, return_class) validate_callback(r_val, validate) @@ -101,8 +101,8 @@ def foo(self, **kwargs): async def run_async_flow_5s(flow): async for r in flow.index( - from_ndarray(np.random.random([num_docs, 4])), - on_done=validate, + from_ndarray(np.random.random([num_docs, 4])), + on_done=validate, ): assert isinstance(r, DocumentArray) @@ -157,7 +157,9 @@ async def test_run_async_flow_other_task_concurrent(protocol): @pytest.mark.parametrize('use_stream', [False, True]) async def test_return_results_async_flow(protocol, flow_cls, use_stream): with flow_cls(protocol=protocol, asyncio=True).add() as f: - async for r in f.index(from_ndarray(np.random.random([10, 2])), stream=use_stream): + async for r in f.index( + from_ndarray(np.random.random([10, 2])), stream=use_stream + ): assert isinstance(r, DocumentArray) @@ -184,5 +186,7 @@ def foo(self, parameters, **kwargs): @pytest.mark.parametrize('use_stream', [False, True]) async def test_async_flow_empty_data(flow_cls, use_stream): with flow_cls(asyncio=True).add(uses=MyExec) as f: - async for r in f.post('/hello', parameters={'hello': 'world'}, stream=use_stream): + async for r in f.post( + '/hello', parameters={'hello': 'world'}, stream=use_stream + ): assert isinstance(r, DocumentArray) diff --git a/tests/unit/orchestrate/flow/flow-construct/test_flow.py b/tests/unit/orchestrate/flow/flow-construct/test_flow.py index 7b5db8f22e495..8b6ba2ec3e4ac 100644 --- a/tests/unit/orchestrate/flow/flow-construct/test_flow.py +++ b/tests/unit/orchestrate/flow/flow-construct/test_flow.py @@ -73,7 +73,9 @@ def bytes_fn(): @pytest.mark.slow def test_flow_identical(tmpdir): - with open(os.path.join(cur_dir, '../../../yaml/test-flow.yml'), encoding='utf-8') as fp: + with open( + os.path.join(cur_dir, '../../../yaml/test-flow.yml'), encoding='utf-8' + ) as fp: a = Flow.load_config(fp) b = ( @@ -657,7 +659,9 @@ def _validate_flow(f): def test_set_port_deployment(port_generator): port = port_generator() with Flow().add(uses=Executor, port=port) as f: - assert int(f._deployment_nodes['executor0'].pod_args['pods'][0][0].port[0]) == port + assert ( + int(f._deployment_nodes['executor0'].pod_args['pods'][0][0].port[0]) == port + ) f.index(inputs=[]) diff --git a/tests/unit/orchestrate/flow/flow-construct/test_flow_monitoring.py b/tests/unit/orchestrate/flow/flow-construct/test_flow_monitoring.py index 6bd0f8ca80f31..18dcd0b6b07f5 100644 --- a/tests/unit/orchestrate/flow/flow-construct/test_flow_monitoring.py +++ b/tests/unit/orchestrate/flow/flow-construct/test_flow_monitoring.py @@ -7,8 +7,7 @@ def get_executor(): class DummyExecutor(Executor): @requests(on='/foo') - def foo(self, docs, **kwargs): - ... + def foo(self, docs, **kwargs): ... return DummyExecutor diff --git a/tests/unit/orchestrate/flow/flow-construct/test_flow_multiprotocol.py b/tests/unit/orchestrate/flow/flow-construct/test_flow_multiprotocol.py index adc87180f71b0..d702a9b8d260a 100644 --- a/tests/unit/orchestrate/flow/flow-construct/test_flow_multiprotocol.py +++ b/tests/unit/orchestrate/flow/flow-construct/test_flow_multiprotocol.py @@ -110,7 +110,9 @@ def test_flow_multiprotocol_yaml(): def test_flow_multiprotocol_ports_protocols_mismatch(): - flow = Flow().config_gateway(port=[random_port(), random_port()], protocol=['grpc', 'http', 'websocket']) + flow = Flow().config_gateway( + port=[random_port(), random_port()], protocol=['grpc', 'http', 'websocket'] + ) with pytest.raises(ValueError) as err_info: with flow: pass diff --git a/tests/unit/orchestrate/flow/flow-construct/test_flow_to_k8s_yaml.py b/tests/unit/orchestrate/flow/flow-construct/test_flow_to_k8s_yaml.py index a335810e0ac40..d79f7cfb30efa 100644 --- a/tests/unit/orchestrate/flow/flow-construct/test_flow_to_k8s_yaml.py +++ b/tests/unit/orchestrate/flow/flow-construct/test_flow_to_k8s_yaml.py @@ -19,12 +19,16 @@ def test_flow_to_k8s_yaml(tmpdir, protocol, flow_port, gateway_replicas): gateway_kwargs['port'] = flow_port gateway_kwargs['replicas'] = gateway_replicas gateway_kwargs['env_from_secret'] = { - 'SECRET_GATEWAY_USERNAME': {'name': 'gateway_secret', 'key': 'gateway_username'}, + 'SECRET_GATEWAY_USERNAME': { + 'name': 'gateway_secret', + 'key': 'gateway_username', + }, } gateway_kwargs['image_pull_secrets'] = ['secret1', 'secret2'] flow = ( - Flow(**flow_kwargs).config_gateway(**gateway_kwargs) + Flow(**flow_kwargs) + .config_gateway(**gateway_kwargs) .add(name='executor0', uses_with={'param': 0}, timeout_ready=60000) .add( name='executor1', @@ -34,7 +38,7 @@ def test_flow_to_k8s_yaml(tmpdir, protocol, flow_port, gateway_replicas): 'SECRET_USERNAME': {'name': 'mysecret', 'key': 'username'}, 'SECRET_PASSWORD': {'name': 'mysecret', 'key': 'password'}, }, - image_pull_secrets=['secret3', 'secret4'] + image_pull_secrets=['secret3', 'secret4'], ) .add( name='executor2', @@ -104,15 +108,22 @@ def test_flow_to_k8s_yaml(tmpdir, protocol, flow_port, gateway_replicas): assert gateway_objects[2]['metadata']['namespace'] == namespace assert gateway_objects[2]['metadata']['name'] == 'gateway' assert gateway_objects[2]['spec']['replicas'] == gateway_replicas - assert gateway_objects[2]['spec']['template']['spec']['imagePullSecrets'] == [{'name': 'secret1'}, {'name': 'secret2'}] + assert gateway_objects[2]['spec']['template']['spec']['imagePullSecrets'] == [ + {'name': 'secret1'}, + {'name': 'secret2'}, + ] gateway_args = gateway_objects[2]['spec']['template']['spec']['containers'][0][ 'args' ] assert gateway_args[0] == 'gateway' assert '--port' in gateway_args - assert gateway_args[gateway_args.index('--port') + 1] == str(GrpcConnectionPool.K8S_PORT) - assert gateway_args[gateway_args.index('--port') + 1] == str(GrpcConnectionPool.K8S_PORT) + assert gateway_args[gateway_args.index('--port') + 1] == str( + GrpcConnectionPool.K8S_PORT + ) + assert gateway_args[gateway_args.index('--port') + 1] == str( + GrpcConnectionPool.K8S_PORT + ) assert '--k8s-namespace' in gateway_args assert gateway_args[gateway_args.index('--k8s-namespace') + 1] == namespace assert '--graph-description' in gateway_args @@ -143,7 +154,9 @@ def test_flow_to_k8s_yaml(tmpdir, protocol, flow_port, gateway_replicas): }, { 'name': 'SECRET_GATEWAY_USERNAME', - 'valueFrom': {'secretKeyRef': {'name': 'gateway_secret', 'key': 'gateway_username'}}, + 'valueFrom': { + 'secretKeyRef': {'name': 'gateway_secret', 'key': 'gateway_username'} + }, }, ] @@ -213,7 +226,9 @@ def test_flow_to_k8s_yaml(tmpdir, protocol, flow_port, gateway_replicas): assert executor1_head0_objects[2]['metadata']['namespace'] == namespace assert executor1_head0_objects[2]['metadata']['name'] == 'executor1-head' assert executor1_head0_objects[2]['spec']['replicas'] == 1 - assert executor1_head0_objects[2]['spec']['template']['spec']['imagePullSecrets'] == [{'name': 'secret3'}, {'name': 'secret4'}] + assert executor1_head0_objects[2]['spec']['template']['spec'][ + 'imagePullSecrets' + ] == [{'name': 'secret3'}, {'name': 'secret4'}] executor1_head0_args = executor1_head0_objects[2]['spec']['template']['spec'][ 'containers' ][0]['args'] @@ -282,7 +297,9 @@ def test_flow_to_k8s_yaml(tmpdir, protocol, flow_port, gateway_replicas): assert executor1_shard0_objects[2]['metadata']['namespace'] == namespace assert executor1_shard0_objects[2]['metadata']['name'] == 'executor1-0' assert executor1_shard0_objects[2]['spec']['replicas'] == 1 - assert executor1_shard0_objects[2]['spec']['template']['spec']['imagePullSecrets'] == [{'name': 'secret3'}, {'name': 'secret4'}] + assert executor1_shard0_objects[2]['spec']['template']['spec'][ + 'imagePullSecrets' + ] == [{'name': 'secret3'}, {'name': 'secret4'}] executor1_shard0_args = executor1_shard0_objects[2]['spec']['template']['spec'][ 'containers' ][0]['args'] @@ -345,7 +362,9 @@ def test_flow_to_k8s_yaml(tmpdir, protocol, flow_port, gateway_replicas): assert executor1_shard1_objects[2]['metadata']['namespace'] == namespace assert executor1_shard1_objects[2]['metadata']['name'] == 'executor1-1' assert executor1_shard1_objects[2]['spec']['replicas'] == 1 - assert executor1_shard1_objects[2]['spec']['template']['spec']['imagePullSecrets'] == [{'name': 'secret3'}, {'name': 'secret4'}] + assert executor1_shard1_objects[2]['spec']['template']['spec'][ + 'imagePullSecrets' + ] == [{'name': 'secret3'}, {'name': 'secret4'}] executor1_shard1_args = executor1_shard1_objects[2]['spec']['template']['spec'][ 'containers' ][0]['args'] @@ -629,4 +648,3 @@ def test_raise_exception_invalid_executor(tmpdir): with pytest.raises(NoContainerizedError): f = Flow().add(uses='A') f.to_kubernetes_yaml(str(tmpdir)) - diff --git a/tests/unit/orchestrate/flow/flow-construct/test_flow_yaml_parser.py b/tests/unit/orchestrate/flow/flow-construct/test_flow_yaml_parser.py index 3e59840800d5e..53d2b950783e1 100644 --- a/tests/unit/orchestrate/flow/flow-construct/test_flow_yaml_parser.py +++ b/tests/unit/orchestrate/flow/flow-construct/test_flow_yaml_parser.py @@ -161,7 +161,9 @@ def test_load_flow_from_cli(): def test_load_flow_from_yaml(): - with open(cur_dir.parent.parent.parent / 'yaml' / 'test-flow.yml', encoding='utf-8') as fp: + with open( + cur_dir.parent.parent.parent / 'yaml' / 'test-flow.yml', encoding='utf-8' + ) as fp: _ = Flow.load_config(fp) diff --git a/tests/unit/orchestrate/flow/flow-construct/test_slow_executor_shutdown.py b/tests/unit/orchestrate/flow/flow-construct/test_slow_executor_shutdown.py index 237bfbb98b296..8584dc80e2b11 100644 --- a/tests/unit/orchestrate/flow/flow-construct/test_slow_executor_shutdown.py +++ b/tests/unit/orchestrate/flow/flow-construct/test_slow_executor_shutdown.py @@ -8,7 +8,9 @@ class SlowExecutor(Executor): def close(self) -> None: - with open(os.path.join(self.metas.workspace, 'test'), 'w', encoding='utf-8') as f: + with open( + os.path.join(self.metas.workspace, 'test'), 'w', encoding='utf-8' + ) as f: time.sleep(10) f.write('x') diff --git a/tests/unit/orchestrate/pods/container/test_container_pod.py b/tests/unit/orchestrate/pods/container/test_container_pod.py index bac3b975883c7..0dcdc58b8a114 100644 --- a/tests/unit/orchestrate/pods/container/test_container_pod.py +++ b/tests/unit/orchestrate/pods/container/test_container_pod.py @@ -159,6 +159,7 @@ def test_pass_arbitrary_kwargs(monkeypatch, mocker): def _mock_is_ready(*args, **kwargs): return True + monkeypatch.setattr( servers.BaseServer, 'is_ready', @@ -182,7 +183,7 @@ def reload(self): def stop(self, *args, **kwargs): pass - + def __init__(self): pass diff --git a/tests/unit/orchestrate/pods/test_pod_factory.py b/tests/unit/orchestrate/pods/test_pod_factory.py index 41efda2dae6e1..391dfc75ddd83 100644 --- a/tests/unit/orchestrate/pods/test_pod_factory.py +++ b/tests/unit/orchestrate/pods/test_pod_factory.py @@ -5,9 +5,7 @@ from jina.parsers import set_pod_parser -@pytest.mark.parametrize( - 'uses', ['jinaai+docker://jina-ai/DummyExecutor'] -) +@pytest.mark.parametrize('uses', ['jinaai+docker://jina-ai/DummyExecutor']) def test_container_pod(mocker, monkeypatch, uses): mock = mocker.Mock() diff --git a/tests/unit/serve/dynamic_batching/test_batch_queue.py b/tests/unit/serve/dynamic_batching/test_batch_queue.py index ad968c3d9fd55..22758995d7270 100644 --- a/tests/unit/serve/dynamic_batching/test_batch_queue.py +++ b/tests/unit/serve/dynamic_batching/test_batch_queue.py @@ -61,6 +61,7 @@ async def process_request(req): @pytest.mark.asyncio async def test_batch_queue_timeout_does_not_wait_previous_batch(): batches_lengths_computed = [] + async def foo(docs, **kwargs): await asyncio.sleep(4) batches_lengths_computed.append(len(docs)) @@ -88,6 +89,7 @@ async def process_request(req, sleep=0): _ = await q.get() q.task_done() return req + init_time = time.time() tasks = [asyncio.create_task(process_request(req)) for req in data_requests] tasks.append(asyncio.create_task(process_request(extra_data_request, sleep=2))) @@ -161,7 +163,9 @@ async def foo(docs, **kwargs): data_requests = [DataRequest() for _ in range(35)] for i, req in enumerate(data_requests): - req.data.docs = DocumentArray(Document(text=f'{i}' if i not in BAD_REQUEST_IDX else 'Bad')) + req.data.docs = DocumentArray( + Document(text=f'{i}' if i not in BAD_REQUEST_IDX else 'Bad') + ) async def process_request(req): q = await bq.push(req) @@ -209,7 +213,9 @@ async def foo(docs, **kwargs): data_requests = [DataRequest() for _ in range(35)] for i, req in enumerate(data_requests): - req.data.docs = DocumentArray(Document(text='' if i not in TRIGGER_BAD_REQUEST_IDX else 'Bad')) + req.data.docs = DocumentArray( + Document(text='' if i not in TRIGGER_BAD_REQUEST_IDX else 'Bad') + ) async def process_request(req): q = await bq.push(req) @@ -303,7 +309,12 @@ async def foo(docs, **kwargs): for i, req in enumerate(data_requests): len_request = random.randint(2, 27) len_requests.append(len_request) - req.data.docs = DocumentArray([Document(text=f'Text {j} from request {i} with len {len_request}') for j in range(len_request)]) + req.data.docs = DocumentArray( + [ + Document(text=f'Text {j} from request {i} with len {len_request}') + for j in range(len_request) + ] + ) async def process_request(req): q = await bq.push(req) diff --git a/tests/unit/serve/executors/test_executor.py b/tests/unit/serve/executors/test_executor.py index ec3fd4c8b9714..3bb6f1769ceff 100644 --- a/tests/unit/serve/executors/test_executor.py +++ b/tests/unit/serve/executors/test_executor.py @@ -306,7 +306,9 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def do(self, *args, **kwargs): - with open(os.path.join(self.workspace, 'text.txt'), 'w', encoding='utf-8') as f: + with open( + os.path.join(self.workspace, 'text.txt'), 'w', encoding='utf-8' + ) as f: f.write('here!') e = MyExec(metas={'workspace': tmpdir}) @@ -451,7 +453,9 @@ def test_to_k8s_yaml(tmpdir, exec_type, uses): executor_type=exec_type, ) - with open(os.path.join(tmpdir, 'executor0', 'executor0.yml'), encoding='utf-8') as f: + with open( + os.path.join(tmpdir, 'executor0', 'executor0.yml'), encoding='utf-8' + ) as f: exec_yaml = list(yaml.safe_load_all(f))[-1] assert exec_yaml['spec']['template']['spec']['containers'][0][ 'image' @@ -467,7 +471,9 @@ def test_to_k8s_yaml(tmpdir, exec_type, uses): 'gateway', } - with open(os.path.join(tmpdir, 'gateway', 'gateway.yml'), encoding='utf-8') as f: + with open( + os.path.join(tmpdir, 'gateway', 'gateway.yml'), encoding='utf-8' + ) as f: gatewayyaml = list(yaml.safe_load_all(f))[-1] assert ( gatewayyaml['spec']['template']['spec']['containers'][0]['ports'][0][ @@ -533,7 +539,9 @@ def foo(self, docs: DocumentArray, **kwargs): cancel_event = multiprocessing.Event() def start_runtime(args, cancel_event): - with AsyncNewLoopRuntime(args, cancel_event=cancel_event, req_handler_cls=WorkerRequestHandler) as runtime: + with AsyncNewLoopRuntime( + args, cancel_event=cancel_event, req_handler_cls=WorkerRequestHandler + ) as runtime: runtime.run_forever() runtime_thread = Process( @@ -687,8 +695,6 @@ def index(self, **kwargs): def update(self, **kwargs): pass - - @requests(on='/search') def search(self, **kwargs): pass diff --git a/tests/unit/serve/gateway/test_gateway.py b/tests/unit/serve/gateway/test_gateway.py index 0d84832c6ab88..c0759f1624291 100644 --- a/tests/unit/serve/gateway/test_gateway.py +++ b/tests/unit/serve/gateway/test_gateway.py @@ -31,22 +31,23 @@ def _create_gateway_runtime(port, uses, uses_with, worker_port): pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}' deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}' with AsyncNewLoopRuntime( - set_gateway_parser().parse_args( - [ - '--port', - str(port), - '--uses', - uses, - '--uses-with', - json.dumps(uses_with), - '--graph-description', - graph_description, - '--deployments-addresses', - pod_addresses, - '--deployments-metadata', - deployments_metadata, - ] - ), req_handler_cls=GatewayRequestHandler + set_gateway_parser().parse_args( + [ + '--port', + str(port), + '--uses', + uses, + '--uses-with', + json.dumps(uses_with), + '--graph-description', + graph_description, + '--deployments-addresses', + pod_addresses, + '--deployments-metadata', + deployments_metadata, + ] + ), + req_handler_cls=GatewayRequestHandler, ) as runtime: runtime.run_forever() @@ -89,59 +90,59 @@ def _start_worker_runtime(uses): [ ('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}), ( - 'DummyGatewayGetStreamer', - {}, - {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}, + 'DummyGatewayGetStreamer', + {}, + {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}, ), ( - _dummy_gateway_yaml_path, - {}, - {'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'}, + _dummy_gateway_yaml_path, + {}, + {'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'}, ), ( - _dummy_fastapi_gateway_yaml_path, - {}, - {'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'}, + _dummy_fastapi_gateway_yaml_path, + {}, + {'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'}, ), ( - 'DummyGateway', - {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, - {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, + 'DummyGateway', + {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, + {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, ), ( - 'DummyGatewayGetStreamer', - {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, - {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, + 'DummyGatewayGetStreamer', + {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, + {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, ), ( - _dummy_gateway_yaml_path, - {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, - {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, + _dummy_gateway_yaml_path, + {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, + {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, ), ( - _dummy_fastapi_gateway_yaml_path, - {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, - {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, + _dummy_fastapi_gateway_yaml_path, + {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, + {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}, ), ( - 'DummyGateway', - {'arg1': 'arg1'}, - {'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'}, + 'DummyGateway', + {'arg1': 'arg1'}, + {'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'}, ), ( - 'DummyGatewayGetStreamer', - {'arg1': 'arg1'}, - {'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'}, + 'DummyGatewayGetStreamer', + {'arg1': 'arg1'}, + {'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'}, ), ( - _dummy_gateway_yaml_path, - {'arg1': 'arg1'}, - {'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'}, + _dummy_gateway_yaml_path, + {'arg1': 'arg1'}, + {'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'}, ), ( - _dummy_fastapi_gateway_yaml_path, - {'arg1': 'arg1'}, - {'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'}, + _dummy_fastapi_gateway_yaml_path, + {'arg1': 'arg1'}, + {'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'}, ), ], ) @@ -178,8 +179,13 @@ def app(self): @app.get('/endpoint') async def get(text: str): - docs = await self.executor['executor1'].post(on='/', inputs=DocumentArray( - [Document(text=text), Document(text=text.upper())]), parameters=PARAMETERS) + docs = await self.executor['executor1'].post( + on='/', + inputs=DocumentArray( + [Document(text=text), Document(text=text.upper())] + ), + parameters=PARAMETERS, + ) return {'result': docs.texts} return app @@ -196,12 +202,16 @@ def func(self, docs, parameters, **kwargs): for doc in docs: doc.text += f' Second(parameters={str(parameters)})' - with Flow().config_gateway(uses=MyGateway, protocol='http').add(uses=FirstExec, name='executor0').add( - uses=SecondExec, name='executor1') as flow: + with Flow().config_gateway(uses=MyGateway, protocol='http').add( + uses=FirstExec, name='executor0' + ).add(uses=SecondExec, name='executor1') as flow: import requests + r = requests.get(f'http://localhost:{flow.port}/endpoint?text=meow') - assert r.json()['result'] == [f'meow Second(parameters={str(PARAMETERS)})', - f'MEOW Second(parameters={str(PARAMETERS)})'] + assert r.json()['result'] == [ + f'meow Second(parameters={str(PARAMETERS)})', + f'MEOW Second(parameters={str(PARAMETERS)})', + ] @pytest.mark.parametrize( @@ -210,7 +220,7 @@ def func(self, docs, parameters, **kwargs): (2, 1), (1, 2), (2, 2), - ] + ], ) def test_stream_individual_executor_multirequest(n_replicas: int, n_shards: int): N_DOCS: int = 100 @@ -233,9 +243,14 @@ def app(self): @app.get('/endpoint') async def get(text: str): - docs = await self.executor['executor1'].post(on='/', inputs=DocumentArray( - [Document(text=f'{text} {i}') for i in range(N_DOCS)]), parameters=PARAMETERS, - request_size=BATCH_SIZE) + docs = await self.executor['executor1'].post( + on='/', + inputs=DocumentArray( + [Document(text=f'{text} {i}') for i in range(N_DOCS)] + ), + parameters=PARAMETERS, + request_size=BATCH_SIZE, + ) pids = set([doc.tags['pid'] for doc in docs]) return {'result': docs.texts, 'pids': pids} @@ -254,13 +269,18 @@ def func(self, docs, parameters, **kwargs): doc.text += f' Second(parameters={str(parameters)})' doc.tags['pid'] = os.getpid() - with Flow().config_gateway(uses=MyGateway, protocol='http').add(uses=FirstExec, name='executor0').add( - uses=SecondExec, name='executor1', replicas=n_replicas, shards=n_shards + with Flow().config_gateway(uses=MyGateway, protocol='http').add( + uses=FirstExec, name='executor0' + ).add( + uses=SecondExec, name='executor1', replicas=n_replicas, shards=n_shards ) as flow: import requests + r = requests.get(f'http://localhost:{flow.port}/endpoint?text=meow') # Make sure the results are correct - assert set(r.json()['result']) == set([f'meow {i} Second(parameters={str(PARAMETERS)})' for i in range(N_DOCS)]) + assert set(r.json()['result']) == set( + [f'meow {i} Second(parameters={str(PARAMETERS)})' for i in range(N_DOCS)] + ) # Make sure we are sending to all replicas and shards assert len(r.json()['pids']) == n_replicas * n_shards diff --git a/tests/unit/serve/runtimes/gateway/graph/test_topology_graph.py b/tests/unit/serve/runtimes/gateway/graph/test_topology_graph.py index 9d67d8e3e8d61..309a5e06ce2d7 100644 --- a/tests/unit/serve/runtimes/gateway/graph/test_topology_graph.py +++ b/tests/unit/serve/runtimes/gateway/graph/test_topology_graph.py @@ -603,7 +603,9 @@ async def receive_from_client(self, client_id, msg: 'DataRequest'): tasks_to_respond = [] tasks_to_ignore = [] for origin_node in graph.origin_nodes: - leaf_tasks = origin_node.get_leaf_req_response_tasks(self.connection_pool, msg, None) + leaf_tasks = origin_node.get_leaf_req_response_tasks( + self.connection_pool, msg, None + ) # Every origin node returns a set of tasks that are the ones corresponding to the leafs of each of their subtrees that unwrap all the previous tasks. # It starts like a chain of waiting for tasks from previous nodes tasks_to_respond.extend([task for ret, task in leaf_tasks if ret]) diff --git a/tests/unit/serve/runtimes/test_helper.py b/tests/unit/serve/runtimes/test_helper.py index 43abba91f2157..323d0cdbc8b02 100644 --- a/tests/unit/serve/runtimes/test_helper.py +++ b/tests/unit/serve/runtimes/test_helper.py @@ -44,15 +44,15 @@ def test_split_key_executor_name(full_key, key, executor): 'param, parsed_param, executor_name', [ ( - {'key': 1, 'executor__key': 2, 'wrong_executor__key': 3}, - {'key': 2}, - 'executor', + {'key': 1, 'executor__key': 2, 'wrong_executor__key': 3}, + {'key': 2}, + 'executor', ), ({'executor__key': 2, 'wrong_executor__key': 3}, {'key': 2}, 'executor'), ( - {'a': 1, 'executor__key': 2, 'wrong_executor__key': 3}, - {'key': 2, 'a': 1}, - 'executor', + {'a': 1, 'executor__key': 2, 'wrong_executor__key': 3}, + {'key': 2, 'a': 1}, + 'executor', ), ({'key_1': 0, 'exec2__key_2': 1}, {'key_1': 0}, 'executor'), ], @@ -69,8 +69,8 @@ def test_get_name_from_replicas(name_w_replicas, name): def _custom_grpc_options( - call_recording_mock: Mock, - additional_options: Optional[Union[list, Dict[str, Any]]] = None, + call_recording_mock: Mock, + additional_options: Optional[Union[list, Dict[str, Any]]] = None, ) -> List[Tuple[str, Any]]: call_recording_mock() expected_grpc_option_keys = [ @@ -375,10 +375,14 @@ class SearchResult(BaseDoc): textlist = DocList[MyTextDoc]([MyTextDoc(text='hey')]) models_created_by_name = {} SearchResult_aux = _create_aux_model_doc_list_to_list(SearchResult) - _ = _create_pydantic_model_from_schema(SearchResult_aux.schema(), 'SearchResult', - models_created_by_name) - QuoteFile_reconstructed_in_gateway_from_Search_results = models_created_by_name['QuoteFile'] + _ = _create_pydantic_model_from_schema( + SearchResult_aux.schema(), 'SearchResult', models_created_by_name + ) + QuoteFile_reconstructed_in_gateway_from_Search_results = models_created_by_name[ + 'QuoteFile' + ] - reconstructed_in_gateway_from_Search_results = QuoteFile_reconstructed_in_gateway_from_Search_results( - texts=textlist) + reconstructed_in_gateway_from_Search_results = ( + QuoteFile_reconstructed_in_gateway_from_Search_results(texts=textlist) + ) assert reconstructed_in_gateway_from_Search_results.texts[0].text == 'hey' diff --git a/tests/unit/serve/runtimes/worker/test_worker_runtime.py b/tests/unit/serve/runtimes/worker/test_worker_runtime.py index 0e5d7bbe83dbc..8130a554a7495 100644 --- a/tests/unit/serve/runtimes/worker/test_worker_runtime.py +++ b/tests/unit/serve/runtimes/worker/test_worker_runtime.py @@ -313,12 +313,10 @@ def foo(self, docs, **kwargs): self.process_2(docs) @monitor(name='metrics_name', documentation='metrics description') - def _proces(self, docs): - ... + def _proces(self, docs): ... @monitor() - def process_2(self, docs): - ... + def process_2(self, docs): ... port = port_generator() args = _generate_pod_args( @@ -377,11 +375,9 @@ def foo(self, docs, **kwargs): ): self.process_2(docs) - def _proces(self, docs): - ... + def _proces(self, docs): ... - def process_2(self, docs): - ... + def process_2(self, docs): ... port = port_generator() args = _generate_pod_args( diff --git a/tests/unit/test_cli.py b/tests/unit/test_cli.py index ee03ebd32b39d..ccacd334378cc 100644 --- a/tests/unit/test_cli.py +++ b/tests/unit/test_cli.py @@ -40,9 +40,7 @@ def test_cli_help(): subprocess.check_call(['jina', 'help', 'deployment']) -@pytest.mark.parametrize( - 'uses', ['jinaai://jina-ai/DummyHubExecutor'] -) +@pytest.mark.parametrize('uses', ['jinaai://jina-ai/DummyHubExecutor']) def test_cli_hub(uses): subprocess.check_call(['jina', 'hub', '--help']) for cmd in ['new', 'status', 'pull', 'push']: diff --git a/tests/unit/test_helper.py b/tests/unit/test_helper.py index 778d28aa35a2a..7833c36a3e3c4 100644 --- a/tests/unit/test_helper.py +++ b/tests/unit/test_helper.py @@ -386,8 +386,10 @@ def yield_generator_func(): yield 1 yield 2 + async def async_yield_generator_func(): import asyncio + for _ in range(10): # no yield pass @@ -404,11 +406,13 @@ def normal_func(): async def async_normal_func(): import asyncio + await asyncio.sleep(0.5) for _ in range(10): # no yield pass + def yield_from_generator_func(): for _ in range(10): # no yield