From 63e29a1cb607c86132a27d9261a5a04e558b6043 Mon Sep 17 00:00:00 2001 From: Yubo Wang Date: Tue, 28 Mar 2023 13:21:11 -0700 Subject: [PATCH] add back the original proto for backward compatible Signed-off-by: Yubo Wang --- .../flyteidl/plugins/kubeflow/common.pb.cc | 40 +- .../flyteidl/plugins/kubeflow/common.pb.h | 35 +- .../flyteidl/plugins/kubeflow/mpi.pb.cc | 258 +++--- gen/pb-cpp/flyteidl/plugins/kubeflow/mpi.pb.h | 163 ++-- .../flyteidl/plugins/kubeflow/pytorch.pb.cc | 258 +++--- .../flyteidl/plugins/kubeflow/pytorch.pb.h | 163 ++-- .../plugins/kubeflow/tensorflow.pb.cc | 262 ++++--- .../flyteidl/plugins/kubeflow/tensorflow.pb.h | 163 ++-- gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.cc | 24 + gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.h | 47 ++ gen/pb-cpp/flyteidl/plugins/mpi.pb.cc | 461 +++++++++++ gen/pb-cpp/flyteidl/plugins/mpi.pb.h | 257 ++++++ .../flyteidl/plugins/pytorch.grpc.pb.cc | 24 + gen/pb-cpp/flyteidl/plugins/pytorch.grpc.pb.h | 47 ++ gen/pb-cpp/flyteidl/plugins/pytorch.pb.cc | 368 +++++++++ gen/pb-cpp/flyteidl/plugins/pytorch.pb.h | 215 +++++ .../flyteidl/plugins/tensorflow.grpc.pb.cc | 24 + .../flyteidl/plugins/tensorflow.grpc.pb.h | 47 ++ gen/pb-cpp/flyteidl/plugins/tensorflow.pb.cc | 461 +++++++++++ gen/pb-cpp/flyteidl/plugins/tensorflow.pb.h | 257 ++++++ .../flyteidl/plugins/kubeflow/common.pb.go | 115 +-- gen/pb-go/flyteidl/plugins/kubeflow/mpi.pb.go | 86 +- .../plugins/kubeflow/mpi.pb.validate.go | 14 +- .../flyteidl/plugins/kubeflow/pytorch.pb.go | 88 +-- .../plugins/kubeflow/pytorch.pb.validate.go | 14 +- .../plugins/kubeflow/tensorflow.pb.go | 90 +-- .../kubeflow/tensorflow.pb.validate.go | 14 +- gen/pb-go/flyteidl/plugins/mpi.pb.go | 107 +++ gen/pb-go/flyteidl/plugins/mpi.pb.validate.go | 110 +++ gen/pb-go/flyteidl/plugins/pytorch.pb.go | 82 ++ .../flyteidl/plugins/pytorch.pb.validate.go | 107 +++ gen/pb-go/flyteidl/plugins/tensorflow.pb.go | 102 +++ .../plugins/tensorflow.pb.validate.go | 111 +++ gen/pb-java/flyteidl/plugins/Mpi.java | 737 ++++++++++++++++++ gen/pb-java/flyteidl/plugins/Pytorch.java | 560 +++++++++++++ gen/pb-java/flyteidl/plugins/Tensorflow.java | 705 +++++++++++++++++ .../flyteidl/plugins/kubeflow/Common.java | 228 ++---- .../flyteidl/plugins/kubeflow/Mpi.java | 595 ++++++++------ .../flyteidl/plugins/kubeflow/Pytorch.java | 595 ++++++++------ .../flyteidl/plugins/kubeflow/Tensorflow.java | 599 ++++++++------ .../flyteidl/plugins/kubeflow/common_pb2.py | 12 +- .../flyteidl/plugins/kubeflow/common_pb2.pyi | 6 - .../flyteidl/plugins/kubeflow/mpi_pb2.py | 11 +- .../flyteidl/plugins/kubeflow/mpi_pb2.pyi | 17 +- .../flyteidl/plugins/kubeflow/pytorch_pb2.py | 11 +- .../flyteidl/plugins/kubeflow/pytorch_pb2.pyi | 17 +- .../plugins/kubeflow/tensorflow_pb2.py | 11 +- .../plugins/kubeflow/tensorflow_pb2.pyi | 17 +- gen/pb_python/flyteidl/plugins/mpi_pb2.py | 26 + gen/pb_python/flyteidl/plugins/mpi_pb2.pyi | 15 + .../flyteidl/plugins/mpi_pb2_grpc.py | 4 + gen/pb_python/flyteidl/plugins/pytorch_pb2.py | 26 + .../flyteidl/plugins/pytorch_pb2.pyi | 11 + .../flyteidl/plugins/pytorch_pb2_grpc.py | 4 + .../flyteidl/plugins/tensorflow_pb2.py | 26 + .../flyteidl/plugins/tensorflow_pb2.pyi | 15 + .../flyteidl/plugins/tensorflow_pb2_grpc.py | 4 + protos/flyteidl/plugins/kubeflow/common.proto | 26 +- protos/flyteidl/plugins/kubeflow/mpi.proto | 18 +- .../flyteidl/plugins/kubeflow/pytorch.proto | 18 +- .../plugins/kubeflow/tensorflow.proto | 18 +- protos/flyteidl/plugins/mpi.proto | 20 + protos/flyteidl/plugins/pytorch.proto | 11 + protos/flyteidl/plugins/tensorflow.proto | 14 + 64 files changed, 7202 insertions(+), 1789 deletions(-) create mode 100644 gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.cc create mode 100644 gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.h create mode 100644 gen/pb-cpp/flyteidl/plugins/mpi.pb.cc create mode 100644 gen/pb-cpp/flyteidl/plugins/mpi.pb.h create mode 100644 gen/pb-cpp/flyteidl/plugins/pytorch.grpc.pb.cc create mode 100644 gen/pb-cpp/flyteidl/plugins/pytorch.grpc.pb.h create mode 100644 gen/pb-cpp/flyteidl/plugins/pytorch.pb.cc create mode 100644 gen/pb-cpp/flyteidl/plugins/pytorch.pb.h create mode 100644 gen/pb-cpp/flyteidl/plugins/tensorflow.grpc.pb.cc create mode 100644 gen/pb-cpp/flyteidl/plugins/tensorflow.grpc.pb.h create mode 100644 gen/pb-cpp/flyteidl/plugins/tensorflow.pb.cc create mode 100644 gen/pb-cpp/flyteidl/plugins/tensorflow.pb.h create mode 100644 gen/pb-go/flyteidl/plugins/mpi.pb.go create mode 100644 gen/pb-go/flyteidl/plugins/mpi.pb.validate.go create mode 100644 gen/pb-go/flyteidl/plugins/pytorch.pb.go create mode 100644 gen/pb-go/flyteidl/plugins/pytorch.pb.validate.go create mode 100644 gen/pb-go/flyteidl/plugins/tensorflow.pb.go create mode 100644 gen/pb-go/flyteidl/plugins/tensorflow.pb.validate.go create mode 100644 gen/pb-java/flyteidl/plugins/Mpi.java create mode 100644 gen/pb-java/flyteidl/plugins/Pytorch.java create mode 100644 gen/pb-java/flyteidl/plugins/Tensorflow.java create mode 100644 gen/pb_python/flyteidl/plugins/mpi_pb2.py create mode 100644 gen/pb_python/flyteidl/plugins/mpi_pb2.pyi create mode 100644 gen/pb_python/flyteidl/plugins/mpi_pb2_grpc.py create mode 100644 gen/pb_python/flyteidl/plugins/pytorch_pb2.py create mode 100644 gen/pb_python/flyteidl/plugins/pytorch_pb2.pyi create mode 100644 gen/pb_python/flyteidl/plugins/pytorch_pb2_grpc.py create mode 100644 gen/pb_python/flyteidl/plugins/tensorflow_pb2.py create mode 100644 gen/pb_python/flyteidl/plugins/tensorflow_pb2.pyi create mode 100644 gen/pb_python/flyteidl/plugins/tensorflow_pb2_grpc.py create mode 100644 protos/flyteidl/plugins/mpi.proto create mode 100644 protos/flyteidl/plugins/pytorch.proto create mode 100644 protos/flyteidl/plugins/tensorflow.proto diff --git a/gen/pb-cpp/flyteidl/plugins/kubeflow/common.pb.cc b/gen/pb-cpp/flyteidl/plugins/kubeflow/common.pb.cc index 6e2d39409..8be88cd80 100644 --- a/gen/pb-cpp/flyteidl/plugins/kubeflow/common.pb.cc +++ b/gen/pb-cpp/flyteidl/plugins/kubeflow/common.pb.cc @@ -45,7 +45,7 @@ void InitDefaults_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto() { } ::google::protobuf::Metadata file_level_metadata_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto[1]; -const ::google::protobuf::EnumDescriptor* file_level_enum_descriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto[3]; +const ::google::protobuf::EnumDescriptor* file_level_enum_descriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto[2]; constexpr ::google::protobuf::ServiceDescriptor const** file_level_service_descriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto = nullptr; const ::google::protobuf::uint32 TableStruct_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { @@ -80,21 +80,18 @@ const char descriptor_table_protodef_flyteidl_2fplugins_2fkubeflow_2fcommon_2epr "lugins.kubeflow.CleanPodPolicy\022\"\n\032ttl_se" "conds_after_finished\030\002 \001(\005\022\035\n\025activeDead" "lineSeconds\030\003 \001(\005\022\025\n\rbackoff_limit\030\004 \001(\005" - "*K\n\rSuccessPolicy\022\032\n\026SUCCESS_POLICY_DEFA" - "ULT\020\000\022\036\n\032SUCCESS_POLICY_ALL_WORKERS\020\001*\177\n" - "\016CleanPodPolicy\022\035\n\031CLEANPOD_POLICY_UNDEF" - "INED\020\000\022\027\n\023CLEANPOD_POLICY_ALL\020\001\022\033\n\027CLEAN" - "POD_POLICY_RUNNING\020\002\022\030\n\024CLEANPOD_POLICY_" - "NONE\020\003*c\n\rRestartPolicy\022\031\n\025RESTART_POLIC" - "Y_ALWAYS\020\000\022\035\n\031RESTART_POLICY_ON_FAILURE\020" - "\001\022\030\n\024RESTART_POLICY_NEVER\020\002B9Z7github.co" - "m/flyteorg/flyteidl/gen/pb-go/flyteidl/p" - "luginsb\006proto3" + "*`\n\016CleanPodPolicy\022\027\n\023CLEANPOD_POLICY_AL" + "L\020\000\022\033\n\027CLEANPOD_POLICY_RUNNING\020\001\022\030\n\024CLEA" + "NPOD_POLICY_NONE\020\002*c\n\rRestartPolicy\022\031\n\025R" + "ESTART_POLICY_ALWAYS\020\000\022\035\n\031RESTART_POLICY" + "_ON_FAILURE\020\001\022\030\n\024RESTART_POLICY_NEVER\020\002B" + "9Z7github.com/flyteorg/flyteidl/gen/pb-g" + "o/flyteidl/pluginsb\006proto3" ; ::google::protobuf::internal::DescriptorTable descriptor_table_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto = { false, InitDefaults_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto, descriptor_table_protodef_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto, - "flyteidl/plugins/kubeflow/common.proto", &assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto, 614, + "flyteidl/plugins/kubeflow/common.proto", &assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto, 506, }; void AddDescriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto() { @@ -109,30 +106,15 @@ static bool dynamic_init_dummy_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto = namespace flyteidl { namespace plugins { namespace kubeflow { -const ::google::protobuf::EnumDescriptor* SuccessPolicy_descriptor() { - ::google::protobuf::internal::AssignDescriptors(&assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto); - return file_level_enum_descriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto[0]; -} -bool SuccessPolicy_IsValid(int value) { - switch (value) { - case 0: - case 1: - return true; - default: - return false; - } -} - const ::google::protobuf::EnumDescriptor* CleanPodPolicy_descriptor() { ::google::protobuf::internal::AssignDescriptors(&assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto); - return file_level_enum_descriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto[1]; + return file_level_enum_descriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto[0]; } bool CleanPodPolicy_IsValid(int value) { switch (value) { case 0: case 1: case 2: - case 3: return true; default: return false; @@ -141,7 +123,7 @@ bool CleanPodPolicy_IsValid(int value) { const ::google::protobuf::EnumDescriptor* RestartPolicy_descriptor() { ::google::protobuf::internal::AssignDescriptors(&assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto); - return file_level_enum_descriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto[2]; + return file_level_enum_descriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto[1]; } bool RestartPolicy_IsValid(int value) { switch (value) { diff --git a/gen/pb-cpp/flyteidl/plugins/kubeflow/common.pb.h b/gen/pb-cpp/flyteidl/plugins/kubeflow/common.pb.h index 4c186d6f8..51cc2b341 100644 --- a/gen/pb-cpp/flyteidl/plugins/kubeflow/common.pb.h +++ b/gen/pb-cpp/flyteidl/plugins/kubeflow/common.pb.h @@ -67,37 +67,15 @@ namespace flyteidl { namespace plugins { namespace kubeflow { -enum SuccessPolicy { - SUCCESS_POLICY_DEFAULT = 0, - SUCCESS_POLICY_ALL_WORKERS = 1, - SuccessPolicy_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::google::protobuf::int32>::min(), - SuccessPolicy_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::google::protobuf::int32>::max() -}; -bool SuccessPolicy_IsValid(int value); -const SuccessPolicy SuccessPolicy_MIN = SUCCESS_POLICY_DEFAULT; -const SuccessPolicy SuccessPolicy_MAX = SUCCESS_POLICY_ALL_WORKERS; -const int SuccessPolicy_ARRAYSIZE = SuccessPolicy_MAX + 1; - -const ::google::protobuf::EnumDescriptor* SuccessPolicy_descriptor(); -inline const ::std::string& SuccessPolicy_Name(SuccessPolicy value) { - return ::google::protobuf::internal::NameOfEnum( - SuccessPolicy_descriptor(), value); -} -inline bool SuccessPolicy_Parse( - const ::std::string& name, SuccessPolicy* value) { - return ::google::protobuf::internal::ParseNamedEnum( - SuccessPolicy_descriptor(), name, value); -} enum CleanPodPolicy { - CLEANPOD_POLICY_UNDEFINED = 0, - CLEANPOD_POLICY_ALL = 1, - CLEANPOD_POLICY_RUNNING = 2, - CLEANPOD_POLICY_NONE = 3, + CLEANPOD_POLICY_ALL = 0, + CLEANPOD_POLICY_RUNNING = 1, + CLEANPOD_POLICY_NONE = 2, CleanPodPolicy_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::google::protobuf::int32>::min(), CleanPodPolicy_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::google::protobuf::int32>::max() }; bool CleanPodPolicy_IsValid(int value); -const CleanPodPolicy CleanPodPolicy_MIN = CLEANPOD_POLICY_UNDEFINED; +const CleanPodPolicy CleanPodPolicy_MIN = CLEANPOD_POLICY_ALL; const CleanPodPolicy CleanPodPolicy_MAX = CLEANPOD_POLICY_NONE; const int CleanPodPolicy_ARRAYSIZE = CleanPodPolicy_MAX + 1; @@ -346,11 +324,6 @@ inline void RunPolicy::set_backoff_limit(::google::protobuf::int32 value) { namespace google { namespace protobuf { -template <> struct is_proto_enum< ::flyteidl::plugins::kubeflow::SuccessPolicy> : ::std::true_type {}; -template <> -inline const EnumDescriptor* GetEnumDescriptor< ::flyteidl::plugins::kubeflow::SuccessPolicy>() { - return ::flyteidl::plugins::kubeflow::SuccessPolicy_descriptor(); -} template <> struct is_proto_enum< ::flyteidl::plugins::kubeflow::CleanPodPolicy> : ::std::true_type {}; template <> inline const EnumDescriptor* GetEnumDescriptor< ::flyteidl::plugins::kubeflow::CleanPodPolicy>() { diff --git a/gen/pb-cpp/flyteidl/plugins/kubeflow/mpi.pb.cc b/gen/pb-cpp/flyteidl/plugins/kubeflow/mpi.pb.cc index a48ad8335..1306a371e 100644 --- a/gen/pb-cpp/flyteidl/plugins/kubeflow/mpi.pb.cc +++ b/gen/pb-cpp/flyteidl/plugins/kubeflow/mpi.pb.cc @@ -16,8 +16,9 @@ // @@protoc_insertion_point(includes) #include +extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fcore_2ftasks_2eproto ::google::protobuf::internal::SCCInfo<1> scc_info_Resources_flyteidl_2fcore_2ftasks_2eproto; extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_RunPolicy_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto; -extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_DistributedMPITrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto; +extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto ::google::protobuf::internal::SCCInfo<1> scc_info_DistributedMPITrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto; namespace flyteidl { namespace plugins { namespace kubeflow { @@ -59,8 +60,9 @@ static void InitDefaultsDistributedMPITrainingReplicaSpec_flyteidl_2fplugins_2fk ::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec::InitAsDefaultInstance(); } -::google::protobuf::internal::SCCInfo<0> scc_info_DistributedMPITrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto = - {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsDistributedMPITrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto}, {}}; +::google::protobuf::internal::SCCInfo<1> scc_info_DistributedMPITrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto = + {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsDistributedMPITrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto}, { + &scc_info_Resources_flyteidl_2fcore_2ftasks_2eproto.base,}}; void InitDefaults_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto() { ::google::protobuf::internal::InitSCC(&scc_info_DistributedMPITrainingTask_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto.base); @@ -80,19 +82,19 @@ const ::google::protobuf::uint32 TableStruct_flyteidl_2fplugins_2fkubeflow_2fmpi PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingTask, worker_replicas_), PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingTask, launcher_replicas_), PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingTask, run_policy_), - PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingTask, success_policy_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec, replicas_), - PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec, pod_template_name_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec, image_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec, resources_), PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec, restart_policy_), }; static const ::google::protobuf::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, sizeof(::flyteidl::plugins::kubeflow::DistributedMPITrainingTask)}, - { 9, -1, sizeof(::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec)}, + { 8, -1, sizeof(::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec)}, }; static ::google::protobuf::Message const * const file_default_instances[] = { @@ -108,35 +110,36 @@ ::google::protobuf::internal::AssignDescriptorsTable assign_descriptors_table_fl const char descriptor_table_protodef_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto[] = "\n#flyteidl/plugins/kubeflow/mpi.proto\022\031f" - "lyteidl.plugins.kubeflow\032&flyteidl/plugi" - "ns/kubeflow/common.proto\"\310\002\n\032Distributed" - "MPITrainingTask\022U\n\017worker_replicas\030\001 \001(\013" - "2<.flyteidl.plugins.kubeflow.Distributed" - "MPITrainingReplicaSpec\022W\n\021launcher_repli" - "cas\030\002 \001(\0132<.flyteidl.plugins.kubeflow.Di" - "stributedMPITrainingReplicaSpec\0228\n\nrun_p" - "olicy\030\003 \001(\0132$.flyteidl.plugins.kubeflow." - "RunPolicy\022@\n\016success_policy\030\004 \001(\0162(.flyt" - "eidl.plugins.kubeflow.SuccessPolicy\"\222\001\n!" - "DistributedMPITrainingReplicaSpec\022\020\n\010rep" - "licas\030\001 \001(\005\022\031\n\021pod_template_name\030\002 \001(\t\022@" - "\n\016restart_policy\030\003 \001(\0162(.flyteidl.plugin" - "s.kubeflow.RestartPolicyB9Z7github.com/f" - "lyteorg/flyteidl/gen/pb-go/flyteidl/plug" - "insb\006proto3" + "lyteidl.plugins.kubeflow\032\031flyteidl/core/" + "tasks.proto\032&flyteidl/plugins/kubeflow/c" + "ommon.proto\"\206\002\n\032DistributedMPITrainingTa" + "sk\022U\n\017worker_replicas\030\001 \001(\0132<.flyteidl.p" + "lugins.kubeflow.DistributedMPITrainingRe" + "plicaSpec\022W\n\021launcher_replicas\030\002 \001(\0132<.f" + "lyteidl.plugins.kubeflow.DistributedMPIT" + "rainingReplicaSpec\0228\n\nrun_policy\030\003 \001(\0132$" + ".flyteidl.plugins.kubeflow.RunPolicy\"\263\001\n" + "!DistributedMPITrainingReplicaSpec\022\020\n\010re" + "plicas\030\001 \001(\005\022\r\n\005image\030\002 \001(\t\022+\n\tresources" + "\030\003 \001(\0132\030.flyteidl.core.Resources\022@\n\016rest" + "art_policy\030\004 \001(\0162(.flyteidl.plugins.kube" + "flow.RestartPolicyB9Z7github.com/flyteor" + "g/flyteidl/gen/pb-go/flyteidl/pluginsb\006p" + "roto3" ; ::google::protobuf::internal::DescriptorTable descriptor_table_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto = { false, InitDefaults_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto, descriptor_table_protodef_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto, - "flyteidl/plugins/kubeflow/mpi.proto", &assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto, 651, + "flyteidl/plugins/kubeflow/mpi.proto", &assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto, 645, }; void AddDescriptors_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto() { - static constexpr ::google::protobuf::internal::InitFunc deps[1] = + static constexpr ::google::protobuf::internal::InitFunc deps[2] = { + ::AddDescriptors_flyteidl_2fcore_2ftasks_2eproto, ::AddDescriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto, }; - ::google::protobuf::internal::AddDescriptors(&descriptor_table_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto, deps, 1); + ::google::protobuf::internal::AddDescriptors(&descriptor_table_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto, deps, 2); } // Force running AddDescriptors() at dynamic initialization time. @@ -184,7 +187,6 @@ void DistributedMPITrainingTask::clear_run_policy() { const int DistributedMPITrainingTask::kWorkerReplicasFieldNumber; const int DistributedMPITrainingTask::kLauncherReplicasFieldNumber; const int DistributedMPITrainingTask::kRunPolicyFieldNumber; -const int DistributedMPITrainingTask::kSuccessPolicyFieldNumber; #endif // !defined(_MSC_VER) || _MSC_VER >= 1900 DistributedMPITrainingTask::DistributedMPITrainingTask() @@ -211,7 +213,6 @@ DistributedMPITrainingTask::DistributedMPITrainingTask(const DistributedMPITrain } else { run_policy_ = nullptr; } - success_policy_ = from.success_policy_; // @@protoc_insertion_point(copy_constructor:flyteidl.plugins.kubeflow.DistributedMPITrainingTask) } @@ -219,8 +220,8 @@ void DistributedMPITrainingTask::SharedCtor() { ::google::protobuf::internal::InitSCC( &scc_info_DistributedMPITrainingTask_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto.base); ::memset(&worker_replicas_, 0, static_cast( - reinterpret_cast(&success_policy_) - - reinterpret_cast(&worker_replicas_)) + sizeof(success_policy_)); + reinterpret_cast(&run_policy_) - + reinterpret_cast(&worker_replicas_)) + sizeof(run_policy_)); } DistributedMPITrainingTask::~DistributedMPITrainingTask() { @@ -261,7 +262,6 @@ void DistributedMPITrainingTask::Clear() { delete run_policy_; } run_policy_ = nullptr; - success_policy_ = 0; _internal_metadata_.Clear(); } @@ -317,14 +317,6 @@ const char* DistributedMPITrainingTask::_InternalParse(const char* begin, const {parser_till_end, object}, ptr - size, ptr)); break; } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - case 4: { - if (static_cast<::google::protobuf::uint8>(tag) != 32) goto handle_unusual; - ::google::protobuf::uint64 val = ::google::protobuf::internal::ReadVarint(&ptr); - msg->set_success_policy(static_cast<::flyteidl::plugins::kubeflow::SuccessPolicy>(val)); - GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); - break; - } default: { handle_unusual: if ((tag & 7) == 4 || tag == 0) { @@ -388,20 +380,6 @@ bool DistributedMPITrainingTask::MergePartialFromCodedStream( break; } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - case 4: { - if (static_cast< ::google::protobuf::uint8>(tag) == (32 & 0xFF)) { - int value = 0; - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>( - input, &value))); - set_success_policy(static_cast< ::flyteidl::plugins::kubeflow::SuccessPolicy >(value)); - } else { - goto handle_unusual; - } - break; - } - default: { handle_unusual: if (tag == 0) { @@ -447,12 +425,6 @@ void DistributedMPITrainingTask::SerializeWithCachedSizes( 3, HasBitSetters::run_policy(this), output); } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - if (this->success_policy() != 0) { - ::google::protobuf::internal::WireFormatLite::WriteEnum( - 4, this->success_policy(), output); - } - if (_internal_metadata_.have_unknown_fields()) { ::google::protobuf::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); @@ -487,12 +459,6 @@ ::google::protobuf::uint8* DistributedMPITrainingTask::InternalSerializeWithCach 3, HasBitSetters::run_policy(this), target); } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - if (this->success_policy() != 0) { - target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray( - 4, this->success_policy(), target); - } - if (_internal_metadata_.have_unknown_fields()) { target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); @@ -535,12 +501,6 @@ size_t DistributedMPITrainingTask::ByteSizeLong() const { *run_policy_); } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - if (this->success_policy() != 0) { - total_size += 1 + - ::google::protobuf::internal::WireFormatLite::EnumSize(this->success_policy()); - } - int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); SetCachedSize(cached_size); return total_size; @@ -577,9 +537,6 @@ void DistributedMPITrainingTask::MergeFrom(const DistributedMPITrainingTask& fro if (from.has_run_policy()) { mutable_run_policy()->::flyteidl::plugins::kubeflow::RunPolicy::MergeFrom(from.run_policy()); } - if (from.success_policy() != 0) { - set_success_policy(from.success_policy()); - } } void DistributedMPITrainingTask::CopyFrom(const ::google::protobuf::Message& from) { @@ -610,7 +567,6 @@ void DistributedMPITrainingTask::InternalSwap(DistributedMPITrainingTask* other) swap(worker_replicas_, other->worker_replicas_); swap(launcher_replicas_, other->launcher_replicas_); swap(run_policy_, other->run_policy_); - swap(success_policy_, other->success_policy_); } ::google::protobuf::Metadata DistributedMPITrainingTask::GetMetadata() const { @@ -622,14 +578,28 @@ ::google::protobuf::Metadata DistributedMPITrainingTask::GetMetadata() const { // =================================================================== void DistributedMPITrainingReplicaSpec::InitAsDefaultInstance() { + ::flyteidl::plugins::kubeflow::_DistributedMPITrainingReplicaSpec_default_instance_._instance.get_mutable()->resources_ = const_cast< ::flyteidl::core::Resources*>( + ::flyteidl::core::Resources::internal_default_instance()); } class DistributedMPITrainingReplicaSpec::HasBitSetters { public: + static const ::flyteidl::core::Resources& resources(const DistributedMPITrainingReplicaSpec* msg); }; +const ::flyteidl::core::Resources& +DistributedMPITrainingReplicaSpec::HasBitSetters::resources(const DistributedMPITrainingReplicaSpec* msg) { + return *msg->resources_; +} +void DistributedMPITrainingReplicaSpec::clear_resources() { + if (GetArenaNoVirtual() == nullptr && resources_ != nullptr) { + delete resources_; + } + resources_ = nullptr; +} #if !defined(_MSC_VER) || _MSC_VER >= 1900 const int DistributedMPITrainingReplicaSpec::kReplicasFieldNumber; -const int DistributedMPITrainingReplicaSpec::kPodTemplateNameFieldNumber; +const int DistributedMPITrainingReplicaSpec::kImageFieldNumber; +const int DistributedMPITrainingReplicaSpec::kResourcesFieldNumber; const int DistributedMPITrainingReplicaSpec::kRestartPolicyFieldNumber; #endif // !defined(_MSC_VER) || _MSC_VER >= 1900 @@ -642,9 +612,14 @@ DistributedMPITrainingReplicaSpec::DistributedMPITrainingReplicaSpec(const Distr : ::google::protobuf::Message(), _internal_metadata_(nullptr) { _internal_metadata_.MergeFrom(from._internal_metadata_); - pod_template_name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - if (from.pod_template_name().size() > 0) { - pod_template_name_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.pod_template_name_); + image_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (from.image().size() > 0) { + image_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.image_); + } + if (from.has_resources()) { + resources_ = new ::flyteidl::core::Resources(*from.resources_); + } else { + resources_ = nullptr; } ::memcpy(&replicas_, &from.replicas_, static_cast(reinterpret_cast(&restart_policy_) - @@ -655,10 +630,10 @@ DistributedMPITrainingReplicaSpec::DistributedMPITrainingReplicaSpec(const Distr void DistributedMPITrainingReplicaSpec::SharedCtor() { ::google::protobuf::internal::InitSCC( &scc_info_DistributedMPITrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto.base); - pod_template_name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - ::memset(&replicas_, 0, static_cast( + image_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + ::memset(&resources_, 0, static_cast( reinterpret_cast(&restart_policy_) - - reinterpret_cast(&replicas_)) + sizeof(restart_policy_)); + reinterpret_cast(&resources_)) + sizeof(restart_policy_)); } DistributedMPITrainingReplicaSpec::~DistributedMPITrainingReplicaSpec() { @@ -667,7 +642,8 @@ DistributedMPITrainingReplicaSpec::~DistributedMPITrainingReplicaSpec() { } void DistributedMPITrainingReplicaSpec::SharedDtor() { - pod_template_name_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + image_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (this != internal_default_instance()) delete resources_; } void DistributedMPITrainingReplicaSpec::SetCachedSize(int size) const { @@ -685,7 +661,11 @@ void DistributedMPITrainingReplicaSpec::Clear() { // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - pod_template_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + image_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (GetArenaNoVirtual() == nullptr && resources_ != nullptr) { + delete resources_; + } + resources_ = nullptr; ::memset(&replicas_, 0, static_cast( reinterpret_cast(&restart_policy_) - reinterpret_cast(&replicas_)) + sizeof(restart_policy_)); @@ -712,13 +692,13 @@ const char* DistributedMPITrainingReplicaSpec::_InternalParse(const char* begin, GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); break; } - // string pod_template_name = 2; + // string image = 2; case 2: { if (static_cast<::google::protobuf::uint8>(tag) != 18) goto handle_unusual; ptr = ::google::protobuf::io::ReadSize(ptr, &size); GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); - ctx->extra_parse_data().SetFieldName("flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name"); - object = msg->mutable_pod_template_name(); + ctx->extra_parse_data().SetFieldName("flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image"); + object = msg->mutable_image(); if (size > end - ptr + ::google::protobuf::internal::ParseContext::kSlopBytes) { parser_till_end = ::google::protobuf::internal::GreedyStringParserUTF8; goto string_till_end; @@ -728,9 +708,22 @@ const char* DistributedMPITrainingReplicaSpec::_InternalParse(const char* begin, ptr += size; break; } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; case 3: { - if (static_cast<::google::protobuf::uint8>(tag) != 24) goto handle_unusual; + if (static_cast<::google::protobuf::uint8>(tag) != 26) goto handle_unusual; + ptr = ::google::protobuf::io::ReadSize(ptr, &size); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + parser_till_end = ::flyteidl::core::Resources::_InternalParse; + object = msg->mutable_resources(); + if (size > end - ptr) goto len_delim_till_end; + ptr += size; + GOOGLE_PROTOBUF_PARSER_ASSERT(ctx->ParseExactRange( + {parser_till_end, object}, ptr - size, ptr)); + break; + } + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; + case 4: { + if (static_cast<::google::protobuf::uint8>(tag) != 32) goto handle_unusual; ::google::protobuf::uint64 val = ::google::protobuf::internal::ReadVarint(&ptr); msg->set_restart_policy(static_cast<::flyteidl::plugins::kubeflow::RestartPolicy>(val)); GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); @@ -783,24 +776,35 @@ bool DistributedMPITrainingReplicaSpec::MergePartialFromCodedStream( break; } - // string pod_template_name = 2; + // string image = 2; case 2: { if (static_cast< ::google::protobuf::uint8>(tag) == (18 & 0xFF)) { DO_(::google::protobuf::internal::WireFormatLite::ReadString( - input, this->mutable_pod_template_name())); + input, this->mutable_image())); DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->pod_template_name().data(), static_cast(this->pod_template_name().length()), + this->image().data(), static_cast(this->image().length()), ::google::protobuf::internal::WireFormatLite::PARSE, - "flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name")); + "flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image")); } else { goto handle_unusual; } break; } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; case 3: { - if (static_cast< ::google::protobuf::uint8>(tag) == (24 & 0xFF)) { + if (static_cast< ::google::protobuf::uint8>(tag) == (26 & 0xFF)) { + DO_(::google::protobuf::internal::WireFormatLite::ReadMessage( + input, mutable_resources())); + } else { + goto handle_unusual; + } + break; + } + + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; + case 4: { + if (static_cast< ::google::protobuf::uint8>(tag) == (32 & 0xFF)) { int value = 0; DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>( @@ -844,20 +848,26 @@ void DistributedMPITrainingReplicaSpec::SerializeWithCachedSizes( ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->replicas(), output); } - // string pod_template_name = 2; - if (this->pod_template_name().size() > 0) { + // string image = 2; + if (this->image().size() > 0) { ::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->pod_template_name().data(), static_cast(this->pod_template_name().length()), + this->image().data(), static_cast(this->image().length()), ::google::protobuf::internal::WireFormatLite::SERIALIZE, - "flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name"); + "flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image"); ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( - 2, this->pod_template_name(), output); + 2, this->image(), output); } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; + if (this->has_resources()) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 3, HasBitSetters::resources(this), output); + } + + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; if (this->restart_policy() != 0) { ::google::protobuf::internal::WireFormatLite::WriteEnum( - 3, this->restart_policy(), output); + 4, this->restart_policy(), output); } if (_internal_metadata_.have_unknown_fields()) { @@ -878,21 +888,28 @@ ::google::protobuf::uint8* DistributedMPITrainingReplicaSpec::InternalSerializeW target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->replicas(), target); } - // string pod_template_name = 2; - if (this->pod_template_name().size() > 0) { + // string image = 2; + if (this->image().size() > 0) { ::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->pod_template_name().data(), static_cast(this->pod_template_name().length()), + this->image().data(), static_cast(this->image().length()), ::google::protobuf::internal::WireFormatLite::SERIALIZE, - "flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name"); + "flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image"); target = ::google::protobuf::internal::WireFormatLite::WriteStringToArray( - 2, this->pod_template_name(), target); + 2, this->image(), target); } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; + if (this->has_resources()) { + target = ::google::protobuf::internal::WireFormatLite:: + InternalWriteMessageToArray( + 3, HasBitSetters::resources(this), target); + } + + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; if (this->restart_policy() != 0) { target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray( - 3, this->restart_policy(), target); + 4, this->restart_policy(), target); } if (_internal_metadata_.have_unknown_fields()) { @@ -916,11 +933,18 @@ size_t DistributedMPITrainingReplicaSpec::ByteSizeLong() const { // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // string pod_template_name = 2; - if (this->pod_template_name().size() > 0) { + // string image = 2; + if (this->image().size() > 0) { total_size += 1 + ::google::protobuf::internal::WireFormatLite::StringSize( - this->pod_template_name()); + this->image()); + } + + // .flyteidl.core.Resources resources = 3; + if (this->has_resources()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::MessageSize( + *resources_); } // int32 replicas = 1; @@ -930,7 +954,7 @@ size_t DistributedMPITrainingReplicaSpec::ByteSizeLong() const { this->replicas()); } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; if (this->restart_policy() != 0) { total_size += 1 + ::google::protobuf::internal::WireFormatLite::EnumSize(this->restart_policy()); @@ -963,9 +987,12 @@ void DistributedMPITrainingReplicaSpec::MergeFrom(const DistributedMPITrainingRe ::google::protobuf::uint32 cached_has_bits = 0; (void) cached_has_bits; - if (from.pod_template_name().size() > 0) { + if (from.image().size() > 0) { - pod_template_name_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.pod_template_name_); + image_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.image_); + } + if (from.has_resources()) { + mutable_resources()->::flyteidl::core::Resources::MergeFrom(from.resources()); } if (from.replicas() != 0) { set_replicas(from.replicas()); @@ -1000,8 +1027,9 @@ void DistributedMPITrainingReplicaSpec::Swap(DistributedMPITrainingReplicaSpec* void DistributedMPITrainingReplicaSpec::InternalSwap(DistributedMPITrainingReplicaSpec* other) { using std::swap; _internal_metadata_.Swap(&other->_internal_metadata_); - pod_template_name_.Swap(&other->pod_template_name_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), + image_.Swap(&other->image_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); + swap(resources_, other->resources_); swap(replicas_, other->replicas_); swap(restart_policy_, other->restart_policy_); } diff --git a/gen/pb-cpp/flyteidl/plugins/kubeflow/mpi.pb.h b/gen/pb-cpp/flyteidl/plugins/kubeflow/mpi.pb.h index 96a1ab6a1..13fb83ef3 100644 --- a/gen/pb-cpp/flyteidl/plugins/kubeflow/mpi.pb.h +++ b/gen/pb-cpp/flyteidl/plugins/kubeflow/mpi.pb.h @@ -31,6 +31,7 @@ #include // IWYU pragma: export #include // IWYU pragma: export #include +#include "flyteidl/core/tasks.pb.h" #include "flyteidl/plugins/kubeflow/common.pb.h" // @@protoc_insertion_point(includes) #include @@ -195,12 +196,6 @@ class DistributedMPITrainingTask final : ::flyteidl::plugins::kubeflow::RunPolicy* mutable_run_policy(); void set_allocated_run_policy(::flyteidl::plugins::kubeflow::RunPolicy* run_policy); - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - void clear_success_policy(); - static const int kSuccessPolicyFieldNumber = 4; - ::flyteidl::plugins::kubeflow::SuccessPolicy success_policy() const; - void set_success_policy(::flyteidl::plugins::kubeflow::SuccessPolicy value); - // @@protoc_insertion_point(class_scope:flyteidl.plugins.kubeflow.DistributedMPITrainingTask) private: class HasBitSetters; @@ -209,7 +204,6 @@ class DistributedMPITrainingTask final : ::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec* worker_replicas_; ::flyteidl::plugins::kubeflow::DistributedMPITrainingReplicaSpec* launcher_replicas_; ::flyteidl::plugins::kubeflow::RunPolicy* run_policy_; - int success_policy_; mutable ::google::protobuf::internal::CachedSize _cached_size_; friend struct ::TableStruct_flyteidl_2fplugins_2fkubeflow_2fmpi_2eproto; }; @@ -310,19 +304,28 @@ class DistributedMPITrainingReplicaSpec final : // accessors ------------------------------------------------------- - // string pod_template_name = 2; - void clear_pod_template_name(); - static const int kPodTemplateNameFieldNumber = 2; - const ::std::string& pod_template_name() const; - void set_pod_template_name(const ::std::string& value); + // string image = 2; + void clear_image(); + static const int kImageFieldNumber = 2; + const ::std::string& image() const; + void set_image(const ::std::string& value); #if LANG_CXX11 - void set_pod_template_name(::std::string&& value); + void set_image(::std::string&& value); #endif - void set_pod_template_name(const char* value); - void set_pod_template_name(const char* value, size_t size); - ::std::string* mutable_pod_template_name(); - ::std::string* release_pod_template_name(); - void set_allocated_pod_template_name(::std::string* pod_template_name); + void set_image(const char* value); + void set_image(const char* value, size_t size); + ::std::string* mutable_image(); + ::std::string* release_image(); + void set_allocated_image(::std::string* image); + + // .flyteidl.core.Resources resources = 3; + bool has_resources() const; + void clear_resources(); + static const int kResourcesFieldNumber = 3; + const ::flyteidl::core::Resources& resources() const; + ::flyteidl::core::Resources* release_resources(); + ::flyteidl::core::Resources* mutable_resources(); + void set_allocated_resources(::flyteidl::core::Resources* resources); // int32 replicas = 1; void clear_replicas(); @@ -330,9 +333,9 @@ class DistributedMPITrainingReplicaSpec final : ::google::protobuf::int32 replicas() const; void set_replicas(::google::protobuf::int32 value); - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; void clear_restart_policy(); - static const int kRestartPolicyFieldNumber = 3; + static const int kRestartPolicyFieldNumber = 4; ::flyteidl::plugins::kubeflow::RestartPolicy restart_policy() const; void set_restart_policy(::flyteidl::plugins::kubeflow::RestartPolicy value); @@ -341,7 +344,8 @@ class DistributedMPITrainingReplicaSpec final : class HasBitSetters; ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; - ::google::protobuf::internal::ArenaStringPtr pod_template_name_; + ::google::protobuf::internal::ArenaStringPtr image_; + ::flyteidl::core::Resources* resources_; ::google::protobuf::int32 replicas_; int restart_policy_; mutable ::google::protobuf::internal::CachedSize _cached_size_; @@ -505,20 +509,6 @@ inline void DistributedMPITrainingTask::set_allocated_run_policy(::flyteidl::plu // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedMPITrainingTask.run_policy) } -// .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; -inline void DistributedMPITrainingTask::clear_success_policy() { - success_policy_ = 0; -} -inline ::flyteidl::plugins::kubeflow::SuccessPolicy DistributedMPITrainingTask::success_policy() const { - // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedMPITrainingTask.success_policy) - return static_cast< ::flyteidl::plugins::kubeflow::SuccessPolicy >(success_policy_); -} -inline void DistributedMPITrainingTask::set_success_policy(::flyteidl::plugins::kubeflow::SuccessPolicy value) { - - success_policy_ = value; - // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedMPITrainingTask.success_policy) -} - // ------------------------------------------------------------------- // DistributedMPITrainingReplicaSpec @@ -537,60 +527,105 @@ inline void DistributedMPITrainingReplicaSpec::set_replicas(::google::protobuf:: // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.replicas) } -// string pod_template_name = 2; -inline void DistributedMPITrainingReplicaSpec::clear_pod_template_name() { - pod_template_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +// string image = 2; +inline void DistributedMPITrainingReplicaSpec::clear_image() { + image_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); } -inline const ::std::string& DistributedMPITrainingReplicaSpec::pod_template_name() const { - // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name) - return pod_template_name_.GetNoArena(); +inline const ::std::string& DistributedMPITrainingReplicaSpec::image() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image) + return image_.GetNoArena(); } -inline void DistributedMPITrainingReplicaSpec::set_pod_template_name(const ::std::string& value) { +inline void DistributedMPITrainingReplicaSpec::set_image(const ::std::string& value) { - pod_template_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); - // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name) + image_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image) } #if LANG_CXX11 -inline void DistributedMPITrainingReplicaSpec::set_pod_template_name(::std::string&& value) { +inline void DistributedMPITrainingReplicaSpec::set_image(::std::string&& value) { - pod_template_name_.SetNoArena( + image_.SetNoArena( &::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); - // @@protoc_insertion_point(field_set_rvalue:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name) + // @@protoc_insertion_point(field_set_rvalue:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image) } #endif -inline void DistributedMPITrainingReplicaSpec::set_pod_template_name(const char* value) { +inline void DistributedMPITrainingReplicaSpec::set_image(const char* value) { GOOGLE_DCHECK(value != nullptr); - pod_template_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); - // @@protoc_insertion_point(field_set_char:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name) + image_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image) } -inline void DistributedMPITrainingReplicaSpec::set_pod_template_name(const char* value, size_t size) { +inline void DistributedMPITrainingReplicaSpec::set_image(const char* value, size_t size) { - pod_template_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), + image_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(reinterpret_cast(value), size)); - // @@protoc_insertion_point(field_set_pointer:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name) + // @@protoc_insertion_point(field_set_pointer:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image) +} +inline ::std::string* DistributedMPITrainingReplicaSpec::mutable_image() { + + // @@protoc_insertion_point(field_mutable:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image) + return image_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline ::std::string* DistributedMPITrainingReplicaSpec::release_image() { + // @@protoc_insertion_point(field_release:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image) + + return image_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline void DistributedMPITrainingReplicaSpec::set_allocated_image(::std::string* image) { + if (image != nullptr) { + + } else { + + } + image_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), image); + // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.image) +} + +// .flyteidl.core.Resources resources = 3; +inline bool DistributedMPITrainingReplicaSpec::has_resources() const { + return this != internal_default_instance() && resources_ != nullptr; } -inline ::std::string* DistributedMPITrainingReplicaSpec::mutable_pod_template_name() { +inline const ::flyteidl::core::Resources& DistributedMPITrainingReplicaSpec::resources() const { + const ::flyteidl::core::Resources* p = resources_; + // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.resources) + return p != nullptr ? *p : *reinterpret_cast( + &::flyteidl::core::_Resources_default_instance_); +} +inline ::flyteidl::core::Resources* DistributedMPITrainingReplicaSpec::release_resources() { + // @@protoc_insertion_point(field_release:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.resources) - // @@protoc_insertion_point(field_mutable:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name) - return pod_template_name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + ::flyteidl::core::Resources* temp = resources_; + resources_ = nullptr; + return temp; } -inline ::std::string* DistributedMPITrainingReplicaSpec::release_pod_template_name() { - // @@protoc_insertion_point(field_release:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name) +inline ::flyteidl::core::Resources* DistributedMPITrainingReplicaSpec::mutable_resources() { - return pod_template_name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (resources_ == nullptr) { + auto* p = CreateMaybeMessage<::flyteidl::core::Resources>(GetArenaNoVirtual()); + resources_ = p; + } + // @@protoc_insertion_point(field_mutable:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.resources) + return resources_; } -inline void DistributedMPITrainingReplicaSpec::set_allocated_pod_template_name(::std::string* pod_template_name) { - if (pod_template_name != nullptr) { +inline void DistributedMPITrainingReplicaSpec::set_allocated_resources(::flyteidl::core::Resources* resources) { + ::google::protobuf::Arena* message_arena = GetArenaNoVirtual(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::google::protobuf::MessageLite*>(resources_); + } + if (resources) { + ::google::protobuf::Arena* submessage_arena = nullptr; + if (message_arena != submessage_arena) { + resources = ::google::protobuf::internal::GetOwnedMessage( + message_arena, resources, submessage_arena); + } } else { } - pod_template_name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), pod_template_name); - // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.pod_template_name) + resources_ = resources; + // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpec.resources) } -// .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; +// .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; inline void DistributedMPITrainingReplicaSpec::clear_restart_policy() { restart_policy_ = 0; } diff --git a/gen/pb-cpp/flyteidl/plugins/kubeflow/pytorch.pb.cc b/gen/pb-cpp/flyteidl/plugins/kubeflow/pytorch.pb.cc index 8ead16062..d81eb072b 100644 --- a/gen/pb-cpp/flyteidl/plugins/kubeflow/pytorch.pb.cc +++ b/gen/pb-cpp/flyteidl/plugins/kubeflow/pytorch.pb.cc @@ -16,8 +16,9 @@ // @@protoc_insertion_point(includes) #include +extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fcore_2ftasks_2eproto ::google::protobuf::internal::SCCInfo<1> scc_info_Resources_flyteidl_2fcore_2ftasks_2eproto; extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_RunPolicy_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto; -extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_DistributedPyTorchTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto; +extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto ::google::protobuf::internal::SCCInfo<1> scc_info_DistributedPyTorchTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto; namespace flyteidl { namespace plugins { namespace kubeflow { @@ -59,8 +60,9 @@ static void InitDefaultsDistributedPyTorchTrainingReplicaSpec_flyteidl_2fplugins ::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec::InitAsDefaultInstance(); } -::google::protobuf::internal::SCCInfo<0> scc_info_DistributedPyTorchTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto = - {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsDistributedPyTorchTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto}, {}}; +::google::protobuf::internal::SCCInfo<1> scc_info_DistributedPyTorchTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto = + {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsDistributedPyTorchTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto}, { + &scc_info_Resources_flyteidl_2fcore_2ftasks_2eproto.base,}}; void InitDefaults_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto() { ::google::protobuf::internal::InitSCC(&scc_info_DistributedPyTorchTrainingTask_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto.base); @@ -80,19 +82,19 @@ const ::google::protobuf::uint32 TableStruct_flyteidl_2fplugins_2fkubeflow_2fpyt PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingTask, worker_replicas_), PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingTask, master_replicas_), PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingTask, run_policy_), - PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingTask, success_policy_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec, replicas_), - PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec, pod_template_name_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec, image_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec, resources_), PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec, restart_policy_), }; static const ::google::protobuf::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, sizeof(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingTask)}, - { 9, -1, sizeof(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec)}, + { 8, -1, sizeof(::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec)}, }; static ::google::protobuf::Message const * const file_default_instances[] = { @@ -108,35 +110,36 @@ ::google::protobuf::internal::AssignDescriptorsTable assign_descriptors_table_fl const char descriptor_table_protodef_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto[] = "\n\'flyteidl/plugins/kubeflow/pytorch.prot" - "o\022\031flyteidl.plugins.kubeflow\032&flyteidl/p" - "lugins/kubeflow/common.proto\"\322\002\n\036Distrib" - "utedPyTorchTrainingTask\022Y\n\017worker_replic" - "as\030\001 \001(\0132@.flyteidl.plugins.kubeflow.Dis" - "tributedPyTorchTrainingReplicaSpec\022Y\n\017ma" - "ster_replicas\030\002 \001(\0132@.flyteidl.plugins.k" - "ubeflow.DistributedPyTorchTrainingReplic" - "aSpec\0228\n\nrun_policy\030\003 \001(\0132$.flyteidl.plu" - "gins.kubeflow.RunPolicy\022@\n\016success_polic" - "y\030\004 \001(\0162(.flyteidl.plugins.kubeflow.Succ" - "essPolicy\"\226\001\n%DistributedPyTorchTraining" - "ReplicaSpec\022\020\n\010replicas\030\001 \001(\005\022\031\n\021pod_tem" - "plate_name\030\002 \001(\t\022@\n\016restart_policy\030\003 \001(\016" - "2(.flyteidl.plugins.kubeflow.RestartPoli" - "cyB9Z7github.com/flyteorg/flyteidl/gen/p" - "b-go/flyteidl/pluginsb\006proto3" + "o\022\031flyteidl.plugins.kubeflow\032\031flyteidl/c" + "ore/tasks.proto\032&flyteidl/plugins/kubefl" + "ow/common.proto\"\220\002\n\036DistributedPyTorchTr" + "ainingTask\022Y\n\017worker_replicas\030\001 \001(\0132@.fl" + "yteidl.plugins.kubeflow.DistributedPyTor" + "chTrainingReplicaSpec\022Y\n\017master_replicas" + "\030\002 \001(\0132@.flyteidl.plugins.kubeflow.Distr" + "ibutedPyTorchTrainingReplicaSpec\0228\n\nrun_" + "policy\030\003 \001(\0132$.flyteidl.plugins.kubeflow" + ".RunPolicy\"\267\001\n%DistributedPyTorchTrainin" + "gReplicaSpec\022\020\n\010replicas\030\001 \001(\005\022\r\n\005image\030" + "\002 \001(\t\022+\n\tresources\030\003 \001(\0132\030.flyteidl.core" + ".Resources\022@\n\016restart_policy\030\004 \001(\0162(.fly" + "teidl.plugins.kubeflow.RestartPolicyB9Z7" + "github.com/flyteorg/flyteidl/gen/pb-go/f" + "lyteidl/pluginsb\006proto3" ; ::google::protobuf::internal::DescriptorTable descriptor_table_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto = { false, InitDefaults_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto, descriptor_table_protodef_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto, - "flyteidl/plugins/kubeflow/pytorch.proto", &assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto, 669, + "flyteidl/plugins/kubeflow/pytorch.proto", &assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto, 663, }; void AddDescriptors_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto() { - static constexpr ::google::protobuf::internal::InitFunc deps[1] = + static constexpr ::google::protobuf::internal::InitFunc deps[2] = { + ::AddDescriptors_flyteidl_2fcore_2ftasks_2eproto, ::AddDescriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto, }; - ::google::protobuf::internal::AddDescriptors(&descriptor_table_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto, deps, 1); + ::google::protobuf::internal::AddDescriptors(&descriptor_table_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto, deps, 2); } // Force running AddDescriptors() at dynamic initialization time. @@ -184,7 +187,6 @@ void DistributedPyTorchTrainingTask::clear_run_policy() { const int DistributedPyTorchTrainingTask::kWorkerReplicasFieldNumber; const int DistributedPyTorchTrainingTask::kMasterReplicasFieldNumber; const int DistributedPyTorchTrainingTask::kRunPolicyFieldNumber; -const int DistributedPyTorchTrainingTask::kSuccessPolicyFieldNumber; #endif // !defined(_MSC_VER) || _MSC_VER >= 1900 DistributedPyTorchTrainingTask::DistributedPyTorchTrainingTask() @@ -211,7 +213,6 @@ DistributedPyTorchTrainingTask::DistributedPyTorchTrainingTask(const Distributed } else { run_policy_ = nullptr; } - success_policy_ = from.success_policy_; // @@protoc_insertion_point(copy_constructor:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingTask) } @@ -219,8 +220,8 @@ void DistributedPyTorchTrainingTask::SharedCtor() { ::google::protobuf::internal::InitSCC( &scc_info_DistributedPyTorchTrainingTask_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto.base); ::memset(&worker_replicas_, 0, static_cast( - reinterpret_cast(&success_policy_) - - reinterpret_cast(&worker_replicas_)) + sizeof(success_policy_)); + reinterpret_cast(&run_policy_) - + reinterpret_cast(&worker_replicas_)) + sizeof(run_policy_)); } DistributedPyTorchTrainingTask::~DistributedPyTorchTrainingTask() { @@ -261,7 +262,6 @@ void DistributedPyTorchTrainingTask::Clear() { delete run_policy_; } run_policy_ = nullptr; - success_policy_ = 0; _internal_metadata_.Clear(); } @@ -317,14 +317,6 @@ const char* DistributedPyTorchTrainingTask::_InternalParse(const char* begin, co {parser_till_end, object}, ptr - size, ptr)); break; } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - case 4: { - if (static_cast<::google::protobuf::uint8>(tag) != 32) goto handle_unusual; - ::google::protobuf::uint64 val = ::google::protobuf::internal::ReadVarint(&ptr); - msg->set_success_policy(static_cast<::flyteidl::plugins::kubeflow::SuccessPolicy>(val)); - GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); - break; - } default: { handle_unusual: if ((tag & 7) == 4 || tag == 0) { @@ -388,20 +380,6 @@ bool DistributedPyTorchTrainingTask::MergePartialFromCodedStream( break; } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - case 4: { - if (static_cast< ::google::protobuf::uint8>(tag) == (32 & 0xFF)) { - int value = 0; - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>( - input, &value))); - set_success_policy(static_cast< ::flyteidl::plugins::kubeflow::SuccessPolicy >(value)); - } else { - goto handle_unusual; - } - break; - } - default: { handle_unusual: if (tag == 0) { @@ -447,12 +425,6 @@ void DistributedPyTorchTrainingTask::SerializeWithCachedSizes( 3, HasBitSetters::run_policy(this), output); } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - if (this->success_policy() != 0) { - ::google::protobuf::internal::WireFormatLite::WriteEnum( - 4, this->success_policy(), output); - } - if (_internal_metadata_.have_unknown_fields()) { ::google::protobuf::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); @@ -487,12 +459,6 @@ ::google::protobuf::uint8* DistributedPyTorchTrainingTask::InternalSerializeWith 3, HasBitSetters::run_policy(this), target); } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - if (this->success_policy() != 0) { - target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray( - 4, this->success_policy(), target); - } - if (_internal_metadata_.have_unknown_fields()) { target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); @@ -535,12 +501,6 @@ size_t DistributedPyTorchTrainingTask::ByteSizeLong() const { *run_policy_); } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - if (this->success_policy() != 0) { - total_size += 1 + - ::google::protobuf::internal::WireFormatLite::EnumSize(this->success_policy()); - } - int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); SetCachedSize(cached_size); return total_size; @@ -577,9 +537,6 @@ void DistributedPyTorchTrainingTask::MergeFrom(const DistributedPyTorchTrainingT if (from.has_run_policy()) { mutable_run_policy()->::flyteidl::plugins::kubeflow::RunPolicy::MergeFrom(from.run_policy()); } - if (from.success_policy() != 0) { - set_success_policy(from.success_policy()); - } } void DistributedPyTorchTrainingTask::CopyFrom(const ::google::protobuf::Message& from) { @@ -610,7 +567,6 @@ void DistributedPyTorchTrainingTask::InternalSwap(DistributedPyTorchTrainingTask swap(worker_replicas_, other->worker_replicas_); swap(master_replicas_, other->master_replicas_); swap(run_policy_, other->run_policy_); - swap(success_policy_, other->success_policy_); } ::google::protobuf::Metadata DistributedPyTorchTrainingTask::GetMetadata() const { @@ -622,14 +578,28 @@ ::google::protobuf::Metadata DistributedPyTorchTrainingTask::GetMetadata() const // =================================================================== void DistributedPyTorchTrainingReplicaSpec::InitAsDefaultInstance() { + ::flyteidl::plugins::kubeflow::_DistributedPyTorchTrainingReplicaSpec_default_instance_._instance.get_mutable()->resources_ = const_cast< ::flyteidl::core::Resources*>( + ::flyteidl::core::Resources::internal_default_instance()); } class DistributedPyTorchTrainingReplicaSpec::HasBitSetters { public: + static const ::flyteidl::core::Resources& resources(const DistributedPyTorchTrainingReplicaSpec* msg); }; +const ::flyteidl::core::Resources& +DistributedPyTorchTrainingReplicaSpec::HasBitSetters::resources(const DistributedPyTorchTrainingReplicaSpec* msg) { + return *msg->resources_; +} +void DistributedPyTorchTrainingReplicaSpec::clear_resources() { + if (GetArenaNoVirtual() == nullptr && resources_ != nullptr) { + delete resources_; + } + resources_ = nullptr; +} #if !defined(_MSC_VER) || _MSC_VER >= 1900 const int DistributedPyTorchTrainingReplicaSpec::kReplicasFieldNumber; -const int DistributedPyTorchTrainingReplicaSpec::kPodTemplateNameFieldNumber; +const int DistributedPyTorchTrainingReplicaSpec::kImageFieldNumber; +const int DistributedPyTorchTrainingReplicaSpec::kResourcesFieldNumber; const int DistributedPyTorchTrainingReplicaSpec::kRestartPolicyFieldNumber; #endif // !defined(_MSC_VER) || _MSC_VER >= 1900 @@ -642,9 +612,14 @@ DistributedPyTorchTrainingReplicaSpec::DistributedPyTorchTrainingReplicaSpec(con : ::google::protobuf::Message(), _internal_metadata_(nullptr) { _internal_metadata_.MergeFrom(from._internal_metadata_); - pod_template_name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - if (from.pod_template_name().size() > 0) { - pod_template_name_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.pod_template_name_); + image_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (from.image().size() > 0) { + image_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.image_); + } + if (from.has_resources()) { + resources_ = new ::flyteidl::core::Resources(*from.resources_); + } else { + resources_ = nullptr; } ::memcpy(&replicas_, &from.replicas_, static_cast(reinterpret_cast(&restart_policy_) - @@ -655,10 +630,10 @@ DistributedPyTorchTrainingReplicaSpec::DistributedPyTorchTrainingReplicaSpec(con void DistributedPyTorchTrainingReplicaSpec::SharedCtor() { ::google::protobuf::internal::InitSCC( &scc_info_DistributedPyTorchTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto.base); - pod_template_name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - ::memset(&replicas_, 0, static_cast( + image_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + ::memset(&resources_, 0, static_cast( reinterpret_cast(&restart_policy_) - - reinterpret_cast(&replicas_)) + sizeof(restart_policy_)); + reinterpret_cast(&resources_)) + sizeof(restart_policy_)); } DistributedPyTorchTrainingReplicaSpec::~DistributedPyTorchTrainingReplicaSpec() { @@ -667,7 +642,8 @@ DistributedPyTorchTrainingReplicaSpec::~DistributedPyTorchTrainingReplicaSpec() } void DistributedPyTorchTrainingReplicaSpec::SharedDtor() { - pod_template_name_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + image_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (this != internal_default_instance()) delete resources_; } void DistributedPyTorchTrainingReplicaSpec::SetCachedSize(int size) const { @@ -685,7 +661,11 @@ void DistributedPyTorchTrainingReplicaSpec::Clear() { // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - pod_template_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + image_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (GetArenaNoVirtual() == nullptr && resources_ != nullptr) { + delete resources_; + } + resources_ = nullptr; ::memset(&replicas_, 0, static_cast( reinterpret_cast(&restart_policy_) - reinterpret_cast(&replicas_)) + sizeof(restart_policy_)); @@ -712,13 +692,13 @@ const char* DistributedPyTorchTrainingReplicaSpec::_InternalParse(const char* be GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); break; } - // string pod_template_name = 2; + // string image = 2; case 2: { if (static_cast<::google::protobuf::uint8>(tag) != 18) goto handle_unusual; ptr = ::google::protobuf::io::ReadSize(ptr, &size); GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); - ctx->extra_parse_data().SetFieldName("flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name"); - object = msg->mutable_pod_template_name(); + ctx->extra_parse_data().SetFieldName("flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image"); + object = msg->mutable_image(); if (size > end - ptr + ::google::protobuf::internal::ParseContext::kSlopBytes) { parser_till_end = ::google::protobuf::internal::GreedyStringParserUTF8; goto string_till_end; @@ -728,9 +708,22 @@ const char* DistributedPyTorchTrainingReplicaSpec::_InternalParse(const char* be ptr += size; break; } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; case 3: { - if (static_cast<::google::protobuf::uint8>(tag) != 24) goto handle_unusual; + if (static_cast<::google::protobuf::uint8>(tag) != 26) goto handle_unusual; + ptr = ::google::protobuf::io::ReadSize(ptr, &size); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + parser_till_end = ::flyteidl::core::Resources::_InternalParse; + object = msg->mutable_resources(); + if (size > end - ptr) goto len_delim_till_end; + ptr += size; + GOOGLE_PROTOBUF_PARSER_ASSERT(ctx->ParseExactRange( + {parser_till_end, object}, ptr - size, ptr)); + break; + } + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; + case 4: { + if (static_cast<::google::protobuf::uint8>(tag) != 32) goto handle_unusual; ::google::protobuf::uint64 val = ::google::protobuf::internal::ReadVarint(&ptr); msg->set_restart_policy(static_cast<::flyteidl::plugins::kubeflow::RestartPolicy>(val)); GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); @@ -783,24 +776,35 @@ bool DistributedPyTorchTrainingReplicaSpec::MergePartialFromCodedStream( break; } - // string pod_template_name = 2; + // string image = 2; case 2: { if (static_cast< ::google::protobuf::uint8>(tag) == (18 & 0xFF)) { DO_(::google::protobuf::internal::WireFormatLite::ReadString( - input, this->mutable_pod_template_name())); + input, this->mutable_image())); DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->pod_template_name().data(), static_cast(this->pod_template_name().length()), + this->image().data(), static_cast(this->image().length()), ::google::protobuf::internal::WireFormatLite::PARSE, - "flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name")); + "flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image")); } else { goto handle_unusual; } break; } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; case 3: { - if (static_cast< ::google::protobuf::uint8>(tag) == (24 & 0xFF)) { + if (static_cast< ::google::protobuf::uint8>(tag) == (26 & 0xFF)) { + DO_(::google::protobuf::internal::WireFormatLite::ReadMessage( + input, mutable_resources())); + } else { + goto handle_unusual; + } + break; + } + + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; + case 4: { + if (static_cast< ::google::protobuf::uint8>(tag) == (32 & 0xFF)) { int value = 0; DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>( @@ -844,20 +848,26 @@ void DistributedPyTorchTrainingReplicaSpec::SerializeWithCachedSizes( ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->replicas(), output); } - // string pod_template_name = 2; - if (this->pod_template_name().size() > 0) { + // string image = 2; + if (this->image().size() > 0) { ::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->pod_template_name().data(), static_cast(this->pod_template_name().length()), + this->image().data(), static_cast(this->image().length()), ::google::protobuf::internal::WireFormatLite::SERIALIZE, - "flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name"); + "flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image"); ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( - 2, this->pod_template_name(), output); + 2, this->image(), output); } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; + if (this->has_resources()) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 3, HasBitSetters::resources(this), output); + } + + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; if (this->restart_policy() != 0) { ::google::protobuf::internal::WireFormatLite::WriteEnum( - 3, this->restart_policy(), output); + 4, this->restart_policy(), output); } if (_internal_metadata_.have_unknown_fields()) { @@ -878,21 +888,28 @@ ::google::protobuf::uint8* DistributedPyTorchTrainingReplicaSpec::InternalSerial target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->replicas(), target); } - // string pod_template_name = 2; - if (this->pod_template_name().size() > 0) { + // string image = 2; + if (this->image().size() > 0) { ::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->pod_template_name().data(), static_cast(this->pod_template_name().length()), + this->image().data(), static_cast(this->image().length()), ::google::protobuf::internal::WireFormatLite::SERIALIZE, - "flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name"); + "flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image"); target = ::google::protobuf::internal::WireFormatLite::WriteStringToArray( - 2, this->pod_template_name(), target); + 2, this->image(), target); } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; + if (this->has_resources()) { + target = ::google::protobuf::internal::WireFormatLite:: + InternalWriteMessageToArray( + 3, HasBitSetters::resources(this), target); + } + + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; if (this->restart_policy() != 0) { target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray( - 3, this->restart_policy(), target); + 4, this->restart_policy(), target); } if (_internal_metadata_.have_unknown_fields()) { @@ -916,11 +933,18 @@ size_t DistributedPyTorchTrainingReplicaSpec::ByteSizeLong() const { // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // string pod_template_name = 2; - if (this->pod_template_name().size() > 0) { + // string image = 2; + if (this->image().size() > 0) { total_size += 1 + ::google::protobuf::internal::WireFormatLite::StringSize( - this->pod_template_name()); + this->image()); + } + + // .flyteidl.core.Resources resources = 3; + if (this->has_resources()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::MessageSize( + *resources_); } // int32 replicas = 1; @@ -930,7 +954,7 @@ size_t DistributedPyTorchTrainingReplicaSpec::ByteSizeLong() const { this->replicas()); } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; if (this->restart_policy() != 0) { total_size += 1 + ::google::protobuf::internal::WireFormatLite::EnumSize(this->restart_policy()); @@ -963,9 +987,12 @@ void DistributedPyTorchTrainingReplicaSpec::MergeFrom(const DistributedPyTorchTr ::google::protobuf::uint32 cached_has_bits = 0; (void) cached_has_bits; - if (from.pod_template_name().size() > 0) { + if (from.image().size() > 0) { - pod_template_name_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.pod_template_name_); + image_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.image_); + } + if (from.has_resources()) { + mutable_resources()->::flyteidl::core::Resources::MergeFrom(from.resources()); } if (from.replicas() != 0) { set_replicas(from.replicas()); @@ -1000,8 +1027,9 @@ void DistributedPyTorchTrainingReplicaSpec::Swap(DistributedPyTorchTrainingRepli void DistributedPyTorchTrainingReplicaSpec::InternalSwap(DistributedPyTorchTrainingReplicaSpec* other) { using std::swap; _internal_metadata_.Swap(&other->_internal_metadata_); - pod_template_name_.Swap(&other->pod_template_name_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), + image_.Swap(&other->image_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); + swap(resources_, other->resources_); swap(replicas_, other->replicas_); swap(restart_policy_, other->restart_policy_); } diff --git a/gen/pb-cpp/flyteidl/plugins/kubeflow/pytorch.pb.h b/gen/pb-cpp/flyteidl/plugins/kubeflow/pytorch.pb.h index 702c19079..cbb3a3751 100644 --- a/gen/pb-cpp/flyteidl/plugins/kubeflow/pytorch.pb.h +++ b/gen/pb-cpp/flyteidl/plugins/kubeflow/pytorch.pb.h @@ -31,6 +31,7 @@ #include // IWYU pragma: export #include // IWYU pragma: export #include +#include "flyteidl/core/tasks.pb.h" #include "flyteidl/plugins/kubeflow/common.pb.h" // @@protoc_insertion_point(includes) #include @@ -195,12 +196,6 @@ class DistributedPyTorchTrainingTask final : ::flyteidl::plugins::kubeflow::RunPolicy* mutable_run_policy(); void set_allocated_run_policy(::flyteidl::plugins::kubeflow::RunPolicy* run_policy); - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - void clear_success_policy(); - static const int kSuccessPolicyFieldNumber = 4; - ::flyteidl::plugins::kubeflow::SuccessPolicy success_policy() const; - void set_success_policy(::flyteidl::plugins::kubeflow::SuccessPolicy value); - // @@protoc_insertion_point(class_scope:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingTask) private: class HasBitSetters; @@ -209,7 +204,6 @@ class DistributedPyTorchTrainingTask final : ::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec* worker_replicas_; ::flyteidl::plugins::kubeflow::DistributedPyTorchTrainingReplicaSpec* master_replicas_; ::flyteidl::plugins::kubeflow::RunPolicy* run_policy_; - int success_policy_; mutable ::google::protobuf::internal::CachedSize _cached_size_; friend struct ::TableStruct_flyteidl_2fplugins_2fkubeflow_2fpytorch_2eproto; }; @@ -310,19 +304,28 @@ class DistributedPyTorchTrainingReplicaSpec final : // accessors ------------------------------------------------------- - // string pod_template_name = 2; - void clear_pod_template_name(); - static const int kPodTemplateNameFieldNumber = 2; - const ::std::string& pod_template_name() const; - void set_pod_template_name(const ::std::string& value); + // string image = 2; + void clear_image(); + static const int kImageFieldNumber = 2; + const ::std::string& image() const; + void set_image(const ::std::string& value); #if LANG_CXX11 - void set_pod_template_name(::std::string&& value); + void set_image(::std::string&& value); #endif - void set_pod_template_name(const char* value); - void set_pod_template_name(const char* value, size_t size); - ::std::string* mutable_pod_template_name(); - ::std::string* release_pod_template_name(); - void set_allocated_pod_template_name(::std::string* pod_template_name); + void set_image(const char* value); + void set_image(const char* value, size_t size); + ::std::string* mutable_image(); + ::std::string* release_image(); + void set_allocated_image(::std::string* image); + + // .flyteidl.core.Resources resources = 3; + bool has_resources() const; + void clear_resources(); + static const int kResourcesFieldNumber = 3; + const ::flyteidl::core::Resources& resources() const; + ::flyteidl::core::Resources* release_resources(); + ::flyteidl::core::Resources* mutable_resources(); + void set_allocated_resources(::flyteidl::core::Resources* resources); // int32 replicas = 1; void clear_replicas(); @@ -330,9 +333,9 @@ class DistributedPyTorchTrainingReplicaSpec final : ::google::protobuf::int32 replicas() const; void set_replicas(::google::protobuf::int32 value); - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; void clear_restart_policy(); - static const int kRestartPolicyFieldNumber = 3; + static const int kRestartPolicyFieldNumber = 4; ::flyteidl::plugins::kubeflow::RestartPolicy restart_policy() const; void set_restart_policy(::flyteidl::plugins::kubeflow::RestartPolicy value); @@ -341,7 +344,8 @@ class DistributedPyTorchTrainingReplicaSpec final : class HasBitSetters; ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; - ::google::protobuf::internal::ArenaStringPtr pod_template_name_; + ::google::protobuf::internal::ArenaStringPtr image_; + ::flyteidl::core::Resources* resources_; ::google::protobuf::int32 replicas_; int restart_policy_; mutable ::google::protobuf::internal::CachedSize _cached_size_; @@ -505,20 +509,6 @@ inline void DistributedPyTorchTrainingTask::set_allocated_run_policy(::flyteidl: // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingTask.run_policy) } -// .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; -inline void DistributedPyTorchTrainingTask::clear_success_policy() { - success_policy_ = 0; -} -inline ::flyteidl::plugins::kubeflow::SuccessPolicy DistributedPyTorchTrainingTask::success_policy() const { - // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingTask.success_policy) - return static_cast< ::flyteidl::plugins::kubeflow::SuccessPolicy >(success_policy_); -} -inline void DistributedPyTorchTrainingTask::set_success_policy(::flyteidl::plugins::kubeflow::SuccessPolicy value) { - - success_policy_ = value; - // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingTask.success_policy) -} - // ------------------------------------------------------------------- // DistributedPyTorchTrainingReplicaSpec @@ -537,60 +527,105 @@ inline void DistributedPyTorchTrainingReplicaSpec::set_replicas(::google::protob // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.replicas) } -// string pod_template_name = 2; -inline void DistributedPyTorchTrainingReplicaSpec::clear_pod_template_name() { - pod_template_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +// string image = 2; +inline void DistributedPyTorchTrainingReplicaSpec::clear_image() { + image_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); } -inline const ::std::string& DistributedPyTorchTrainingReplicaSpec::pod_template_name() const { - // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name) - return pod_template_name_.GetNoArena(); +inline const ::std::string& DistributedPyTorchTrainingReplicaSpec::image() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image) + return image_.GetNoArena(); } -inline void DistributedPyTorchTrainingReplicaSpec::set_pod_template_name(const ::std::string& value) { +inline void DistributedPyTorchTrainingReplicaSpec::set_image(const ::std::string& value) { - pod_template_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); - // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name) + image_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image) } #if LANG_CXX11 -inline void DistributedPyTorchTrainingReplicaSpec::set_pod_template_name(::std::string&& value) { +inline void DistributedPyTorchTrainingReplicaSpec::set_image(::std::string&& value) { - pod_template_name_.SetNoArena( + image_.SetNoArena( &::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); - // @@protoc_insertion_point(field_set_rvalue:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name) + // @@protoc_insertion_point(field_set_rvalue:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image) } #endif -inline void DistributedPyTorchTrainingReplicaSpec::set_pod_template_name(const char* value) { +inline void DistributedPyTorchTrainingReplicaSpec::set_image(const char* value) { GOOGLE_DCHECK(value != nullptr); - pod_template_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); - // @@protoc_insertion_point(field_set_char:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name) + image_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image) } -inline void DistributedPyTorchTrainingReplicaSpec::set_pod_template_name(const char* value, size_t size) { +inline void DistributedPyTorchTrainingReplicaSpec::set_image(const char* value, size_t size) { - pod_template_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), + image_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(reinterpret_cast(value), size)); - // @@protoc_insertion_point(field_set_pointer:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name) + // @@protoc_insertion_point(field_set_pointer:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image) +} +inline ::std::string* DistributedPyTorchTrainingReplicaSpec::mutable_image() { + + // @@protoc_insertion_point(field_mutable:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image) + return image_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline ::std::string* DistributedPyTorchTrainingReplicaSpec::release_image() { + // @@protoc_insertion_point(field_release:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image) + + return image_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline void DistributedPyTorchTrainingReplicaSpec::set_allocated_image(::std::string* image) { + if (image != nullptr) { + + } else { + + } + image_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), image); + // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.image) +} + +// .flyteidl.core.Resources resources = 3; +inline bool DistributedPyTorchTrainingReplicaSpec::has_resources() const { + return this != internal_default_instance() && resources_ != nullptr; } -inline ::std::string* DistributedPyTorchTrainingReplicaSpec::mutable_pod_template_name() { +inline const ::flyteidl::core::Resources& DistributedPyTorchTrainingReplicaSpec::resources() const { + const ::flyteidl::core::Resources* p = resources_; + // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.resources) + return p != nullptr ? *p : *reinterpret_cast( + &::flyteidl::core::_Resources_default_instance_); +} +inline ::flyteidl::core::Resources* DistributedPyTorchTrainingReplicaSpec::release_resources() { + // @@protoc_insertion_point(field_release:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.resources) - // @@protoc_insertion_point(field_mutable:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name) - return pod_template_name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + ::flyteidl::core::Resources* temp = resources_; + resources_ = nullptr; + return temp; } -inline ::std::string* DistributedPyTorchTrainingReplicaSpec::release_pod_template_name() { - // @@protoc_insertion_point(field_release:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name) +inline ::flyteidl::core::Resources* DistributedPyTorchTrainingReplicaSpec::mutable_resources() { - return pod_template_name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (resources_ == nullptr) { + auto* p = CreateMaybeMessage<::flyteidl::core::Resources>(GetArenaNoVirtual()); + resources_ = p; + } + // @@protoc_insertion_point(field_mutable:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.resources) + return resources_; } -inline void DistributedPyTorchTrainingReplicaSpec::set_allocated_pod_template_name(::std::string* pod_template_name) { - if (pod_template_name != nullptr) { +inline void DistributedPyTorchTrainingReplicaSpec::set_allocated_resources(::flyteidl::core::Resources* resources) { + ::google::protobuf::Arena* message_arena = GetArenaNoVirtual(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::google::protobuf::MessageLite*>(resources_); + } + if (resources) { + ::google::protobuf::Arena* submessage_arena = nullptr; + if (message_arena != submessage_arena) { + resources = ::google::protobuf::internal::GetOwnedMessage( + message_arena, resources, submessage_arena); + } } else { } - pod_template_name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), pod_template_name); - // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.pod_template_name) + resources_ = resources; + // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.resources) } -// .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; +// .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; inline void DistributedPyTorchTrainingReplicaSpec::clear_restart_policy() { restart_policy_ = 0; } diff --git a/gen/pb-cpp/flyteidl/plugins/kubeflow/tensorflow.pb.cc b/gen/pb-cpp/flyteidl/plugins/kubeflow/tensorflow.pb.cc index 56c700d88..9227307c4 100644 --- a/gen/pb-cpp/flyteidl/plugins/kubeflow/tensorflow.pb.cc +++ b/gen/pb-cpp/flyteidl/plugins/kubeflow/tensorflow.pb.cc @@ -16,8 +16,9 @@ // @@protoc_insertion_point(includes) #include +extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fcore_2ftasks_2eproto ::google::protobuf::internal::SCCInfo<1> scc_info_Resources_flyteidl_2fcore_2ftasks_2eproto; extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_RunPolicy_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto; -extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_DistributedTensorflowTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto; +extern PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto ::google::protobuf::internal::SCCInfo<1> scc_info_DistributedTensorflowTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto; namespace flyteidl { namespace plugins { namespace kubeflow { @@ -59,8 +60,9 @@ static void InitDefaultsDistributedTensorflowTrainingReplicaSpec_flyteidl_2fplug ::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec::InitAsDefaultInstance(); } -::google::protobuf::internal::SCCInfo<0> scc_info_DistributedTensorflowTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto = - {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsDistributedTensorflowTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto}, {}}; +::google::protobuf::internal::SCCInfo<1> scc_info_DistributedTensorflowTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto = + {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsDistributedTensorflowTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto}, { + &scc_info_Resources_flyteidl_2fcore_2ftasks_2eproto.base,}}; void InitDefaults_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto() { ::google::protobuf::internal::InitSCC(&scc_info_DistributedTensorflowTrainingTask_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto.base); @@ -81,19 +83,19 @@ const ::google::protobuf::uint32 TableStruct_flyteidl_2fplugins_2fkubeflow_2ften PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingTask, ps_replicas_), PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingTask, chief_replicas_), PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingTask, run_policy_), - PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingTask, success_policy_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec, replicas_), - PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec, pod_template_name_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec, image_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec, resources_), PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec, restart_policy_), }; static const ::google::protobuf::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, sizeof(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingTask)}, - { 10, -1, sizeof(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec)}, + { 9, -1, sizeof(::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec)}, }; static ::google::protobuf::Message const * const file_default_instances[] = { @@ -109,38 +111,39 @@ ::google::protobuf::internal::AssignDescriptorsTable assign_descriptors_table_fl const char descriptor_table_protodef_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto[] = "\n*flyteidl/plugins/kubeflow/tensorflow.p" - "roto\022\031flyteidl.plugins.kubeflow\032&flyteid" - "l/plugins/kubeflow/common.proto\"\264\003\n!Dist" - "ributedTensorflowTrainingTask\022\\\n\017worker_" - "replicas\030\001 \001(\0132C.flyteidl.plugins.kubefl" - "ow.DistributedTensorflowTrainingReplicaS" - "pec\022X\n\013ps_replicas\030\002 \001(\0132C.flyteidl.plug" + "roto\022\031flyteidl.plugins.kubeflow\032\031flyteid" + "l/core/tasks.proto\032&flyteidl/plugins/kub" + "eflow/common.proto\"\362\002\n!DistributedTensor" + "flowTrainingTask\022\\\n\017worker_replicas\030\001 \001(" + "\0132C.flyteidl.plugins.kubeflow.Distribute" + "dTensorflowTrainingReplicaSpec\022X\n\013ps_rep" + "licas\030\002 \001(\0132C.flyteidl.plugins.kubeflow." + "DistributedTensorflowTrainingReplicaSpec" + "\022[\n\016chief_replicas\030\003 \001(\0132C.flyteidl.plug" "ins.kubeflow.DistributedTensorflowTraini" - "ngReplicaSpec\022[\n\016chief_replicas\030\003 \001(\0132C." - "flyteidl.plugins.kubeflow.DistributedTen" - "sorflowTrainingReplicaSpec\0228\n\nrun_policy" - "\030\004 \001(\0132$.flyteidl.plugins.kubeflow.RunPo" - "licy\022@\n\016success_policy\030\005 \001(\0162(.flyteidl." - "plugins.kubeflow.SuccessPolicy\"\231\001\n(Distr" - "ibutedTensorflowTrainingReplicaSpec\022\020\n\010r" - "eplicas\030\001 \001(\005\022\031\n\021pod_template_name\030\002 \001(\t" - "\022@\n\016restart_policy\030\003 \001(\0162(.flyteidl.plug" - "ins.kubeflow.RestartPolicyB9Z7github.com" - "/flyteorg/flyteidl/gen/pb-go/flyteidl/pl" - "uginsb\006proto3" + "ngReplicaSpec\0228\n\nrun_policy\030\004 \001(\0132$.flyt" + "eidl.plugins.kubeflow.RunPolicy\"\272\001\n(Dist" + "ributedTensorflowTrainingReplicaSpec\022\020\n\010" + "replicas\030\001 \001(\005\022\r\n\005image\030\002 \001(\t\022+\n\tresourc" + "es\030\003 \001(\0132\030.flyteidl.core.Resources\022@\n\016re" + "start_policy\030\004 \001(\0162(.flyteidl.plugins.ku" + "beflow.RestartPolicyB9Z7github.com/flyte" + "org/flyteidl/gen/pb-go/flyteidl/pluginsb" + "\006proto3" ; ::google::protobuf::internal::DescriptorTable descriptor_table_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto = { false, InitDefaults_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto, descriptor_table_protodef_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto, - "flyteidl/plugins/kubeflow/tensorflow.proto", &assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto, 773, + "flyteidl/plugins/kubeflow/tensorflow.proto", &assign_descriptors_table_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto, 767, }; void AddDescriptors_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto() { - static constexpr ::google::protobuf::internal::InitFunc deps[1] = + static constexpr ::google::protobuf::internal::InitFunc deps[2] = { + ::AddDescriptors_flyteidl_2fcore_2ftasks_2eproto, ::AddDescriptors_flyteidl_2fplugins_2fkubeflow_2fcommon_2eproto, }; - ::google::protobuf::internal::AddDescriptors(&descriptor_table_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto, deps, 1); + ::google::protobuf::internal::AddDescriptors(&descriptor_table_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto, deps, 2); } // Force running AddDescriptors() at dynamic initialization time. @@ -196,7 +199,6 @@ const int DistributedTensorflowTrainingTask::kWorkerReplicasFieldNumber; const int DistributedTensorflowTrainingTask::kPsReplicasFieldNumber; const int DistributedTensorflowTrainingTask::kChiefReplicasFieldNumber; const int DistributedTensorflowTrainingTask::kRunPolicyFieldNumber; -const int DistributedTensorflowTrainingTask::kSuccessPolicyFieldNumber; #endif // !defined(_MSC_VER) || _MSC_VER >= 1900 DistributedTensorflowTrainingTask::DistributedTensorflowTrainingTask() @@ -228,7 +230,6 @@ DistributedTensorflowTrainingTask::DistributedTensorflowTrainingTask(const Distr } else { run_policy_ = nullptr; } - success_policy_ = from.success_policy_; // @@protoc_insertion_point(copy_constructor:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingTask) } @@ -236,8 +237,8 @@ void DistributedTensorflowTrainingTask::SharedCtor() { ::google::protobuf::internal::InitSCC( &scc_info_DistributedTensorflowTrainingTask_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto.base); ::memset(&worker_replicas_, 0, static_cast( - reinterpret_cast(&success_policy_) - - reinterpret_cast(&worker_replicas_)) + sizeof(success_policy_)); + reinterpret_cast(&run_policy_) - + reinterpret_cast(&worker_replicas_)) + sizeof(run_policy_)); } DistributedTensorflowTrainingTask::~DistributedTensorflowTrainingTask() { @@ -283,7 +284,6 @@ void DistributedTensorflowTrainingTask::Clear() { delete run_policy_; } run_policy_ = nullptr; - success_policy_ = 0; _internal_metadata_.Clear(); } @@ -352,14 +352,6 @@ const char* DistributedTensorflowTrainingTask::_InternalParse(const char* begin, {parser_till_end, object}, ptr - size, ptr)); break; } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - case 5: { - if (static_cast<::google::protobuf::uint8>(tag) != 40) goto handle_unusual; - ::google::protobuf::uint64 val = ::google::protobuf::internal::ReadVarint(&ptr); - msg->set_success_policy(static_cast<::flyteidl::plugins::kubeflow::SuccessPolicy>(val)); - GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); - break; - } default: { handle_unusual: if ((tag & 7) == 4 || tag == 0) { @@ -434,20 +426,6 @@ bool DistributedTensorflowTrainingTask::MergePartialFromCodedStream( break; } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - case 5: { - if (static_cast< ::google::protobuf::uint8>(tag) == (40 & 0xFF)) { - int value = 0; - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>( - input, &value))); - set_success_policy(static_cast< ::flyteidl::plugins::kubeflow::SuccessPolicy >(value)); - } else { - goto handle_unusual; - } - break; - } - default: { handle_unusual: if (tag == 0) { @@ -499,12 +477,6 @@ void DistributedTensorflowTrainingTask::SerializeWithCachedSizes( 4, HasBitSetters::run_policy(this), output); } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - if (this->success_policy() != 0) { - ::google::protobuf::internal::WireFormatLite::WriteEnum( - 5, this->success_policy(), output); - } - if (_internal_metadata_.have_unknown_fields()) { ::google::protobuf::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); @@ -546,12 +518,6 @@ ::google::protobuf::uint8* DistributedTensorflowTrainingTask::InternalSerializeW 4, HasBitSetters::run_policy(this), target); } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - if (this->success_policy() != 0) { - target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray( - 5, this->success_policy(), target); - } - if (_internal_metadata_.have_unknown_fields()) { target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); @@ -601,12 +567,6 @@ size_t DistributedTensorflowTrainingTask::ByteSizeLong() const { *run_policy_); } - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - if (this->success_policy() != 0) { - total_size += 1 + - ::google::protobuf::internal::WireFormatLite::EnumSize(this->success_policy()); - } - int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); SetCachedSize(cached_size); return total_size; @@ -646,9 +606,6 @@ void DistributedTensorflowTrainingTask::MergeFrom(const DistributedTensorflowTra if (from.has_run_policy()) { mutable_run_policy()->::flyteidl::plugins::kubeflow::RunPolicy::MergeFrom(from.run_policy()); } - if (from.success_policy() != 0) { - set_success_policy(from.success_policy()); - } } void DistributedTensorflowTrainingTask::CopyFrom(const ::google::protobuf::Message& from) { @@ -680,7 +637,6 @@ void DistributedTensorflowTrainingTask::InternalSwap(DistributedTensorflowTraini swap(ps_replicas_, other->ps_replicas_); swap(chief_replicas_, other->chief_replicas_); swap(run_policy_, other->run_policy_); - swap(success_policy_, other->success_policy_); } ::google::protobuf::Metadata DistributedTensorflowTrainingTask::GetMetadata() const { @@ -692,14 +648,28 @@ ::google::protobuf::Metadata DistributedTensorflowTrainingTask::GetMetadata() co // =================================================================== void DistributedTensorflowTrainingReplicaSpec::InitAsDefaultInstance() { + ::flyteidl::plugins::kubeflow::_DistributedTensorflowTrainingReplicaSpec_default_instance_._instance.get_mutable()->resources_ = const_cast< ::flyteidl::core::Resources*>( + ::flyteidl::core::Resources::internal_default_instance()); } class DistributedTensorflowTrainingReplicaSpec::HasBitSetters { public: + static const ::flyteidl::core::Resources& resources(const DistributedTensorflowTrainingReplicaSpec* msg); }; +const ::flyteidl::core::Resources& +DistributedTensorflowTrainingReplicaSpec::HasBitSetters::resources(const DistributedTensorflowTrainingReplicaSpec* msg) { + return *msg->resources_; +} +void DistributedTensorflowTrainingReplicaSpec::clear_resources() { + if (GetArenaNoVirtual() == nullptr && resources_ != nullptr) { + delete resources_; + } + resources_ = nullptr; +} #if !defined(_MSC_VER) || _MSC_VER >= 1900 const int DistributedTensorflowTrainingReplicaSpec::kReplicasFieldNumber; -const int DistributedTensorflowTrainingReplicaSpec::kPodTemplateNameFieldNumber; +const int DistributedTensorflowTrainingReplicaSpec::kImageFieldNumber; +const int DistributedTensorflowTrainingReplicaSpec::kResourcesFieldNumber; const int DistributedTensorflowTrainingReplicaSpec::kRestartPolicyFieldNumber; #endif // !defined(_MSC_VER) || _MSC_VER >= 1900 @@ -712,9 +682,14 @@ DistributedTensorflowTrainingReplicaSpec::DistributedTensorflowTrainingReplicaSp : ::google::protobuf::Message(), _internal_metadata_(nullptr) { _internal_metadata_.MergeFrom(from._internal_metadata_); - pod_template_name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - if (from.pod_template_name().size() > 0) { - pod_template_name_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.pod_template_name_); + image_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (from.image().size() > 0) { + image_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.image_); + } + if (from.has_resources()) { + resources_ = new ::flyteidl::core::Resources(*from.resources_); + } else { + resources_ = nullptr; } ::memcpy(&replicas_, &from.replicas_, static_cast(reinterpret_cast(&restart_policy_) - @@ -725,10 +700,10 @@ DistributedTensorflowTrainingReplicaSpec::DistributedTensorflowTrainingReplicaSp void DistributedTensorflowTrainingReplicaSpec::SharedCtor() { ::google::protobuf::internal::InitSCC( &scc_info_DistributedTensorflowTrainingReplicaSpec_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto.base); - pod_template_name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - ::memset(&replicas_, 0, static_cast( + image_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + ::memset(&resources_, 0, static_cast( reinterpret_cast(&restart_policy_) - - reinterpret_cast(&replicas_)) + sizeof(restart_policy_)); + reinterpret_cast(&resources_)) + sizeof(restart_policy_)); } DistributedTensorflowTrainingReplicaSpec::~DistributedTensorflowTrainingReplicaSpec() { @@ -737,7 +712,8 @@ DistributedTensorflowTrainingReplicaSpec::~DistributedTensorflowTrainingReplicaS } void DistributedTensorflowTrainingReplicaSpec::SharedDtor() { - pod_template_name_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + image_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (this != internal_default_instance()) delete resources_; } void DistributedTensorflowTrainingReplicaSpec::SetCachedSize(int size) const { @@ -755,7 +731,11 @@ void DistributedTensorflowTrainingReplicaSpec::Clear() { // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - pod_template_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + image_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (GetArenaNoVirtual() == nullptr && resources_ != nullptr) { + delete resources_; + } + resources_ = nullptr; ::memset(&replicas_, 0, static_cast( reinterpret_cast(&restart_policy_) - reinterpret_cast(&replicas_)) + sizeof(restart_policy_)); @@ -782,13 +762,13 @@ const char* DistributedTensorflowTrainingReplicaSpec::_InternalParse(const char* GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); break; } - // string pod_template_name = 2; + // string image = 2; case 2: { if (static_cast<::google::protobuf::uint8>(tag) != 18) goto handle_unusual; ptr = ::google::protobuf::io::ReadSize(ptr, &size); GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); - ctx->extra_parse_data().SetFieldName("flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name"); - object = msg->mutable_pod_template_name(); + ctx->extra_parse_data().SetFieldName("flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image"); + object = msg->mutable_image(); if (size > end - ptr + ::google::protobuf::internal::ParseContext::kSlopBytes) { parser_till_end = ::google::protobuf::internal::GreedyStringParserUTF8; goto string_till_end; @@ -798,9 +778,22 @@ const char* DistributedTensorflowTrainingReplicaSpec::_InternalParse(const char* ptr += size; break; } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; case 3: { - if (static_cast<::google::protobuf::uint8>(tag) != 24) goto handle_unusual; + if (static_cast<::google::protobuf::uint8>(tag) != 26) goto handle_unusual; + ptr = ::google::protobuf::io::ReadSize(ptr, &size); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + parser_till_end = ::flyteidl::core::Resources::_InternalParse; + object = msg->mutable_resources(); + if (size > end - ptr) goto len_delim_till_end; + ptr += size; + GOOGLE_PROTOBUF_PARSER_ASSERT(ctx->ParseExactRange( + {parser_till_end, object}, ptr - size, ptr)); + break; + } + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; + case 4: { + if (static_cast<::google::protobuf::uint8>(tag) != 32) goto handle_unusual; ::google::protobuf::uint64 val = ::google::protobuf::internal::ReadVarint(&ptr); msg->set_restart_policy(static_cast<::flyteidl::plugins::kubeflow::RestartPolicy>(val)); GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); @@ -853,24 +846,35 @@ bool DistributedTensorflowTrainingReplicaSpec::MergePartialFromCodedStream( break; } - // string pod_template_name = 2; + // string image = 2; case 2: { if (static_cast< ::google::protobuf::uint8>(tag) == (18 & 0xFF)) { DO_(::google::protobuf::internal::WireFormatLite::ReadString( - input, this->mutable_pod_template_name())); + input, this->mutable_image())); DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->pod_template_name().data(), static_cast(this->pod_template_name().length()), + this->image().data(), static_cast(this->image().length()), ::google::protobuf::internal::WireFormatLite::PARSE, - "flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name")); + "flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image")); } else { goto handle_unusual; } break; } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; case 3: { - if (static_cast< ::google::protobuf::uint8>(tag) == (24 & 0xFF)) { + if (static_cast< ::google::protobuf::uint8>(tag) == (26 & 0xFF)) { + DO_(::google::protobuf::internal::WireFormatLite::ReadMessage( + input, mutable_resources())); + } else { + goto handle_unusual; + } + break; + } + + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; + case 4: { + if (static_cast< ::google::protobuf::uint8>(tag) == (32 & 0xFF)) { int value = 0; DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>( @@ -914,20 +918,26 @@ void DistributedTensorflowTrainingReplicaSpec::SerializeWithCachedSizes( ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->replicas(), output); } - // string pod_template_name = 2; - if (this->pod_template_name().size() > 0) { + // string image = 2; + if (this->image().size() > 0) { ::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->pod_template_name().data(), static_cast(this->pod_template_name().length()), + this->image().data(), static_cast(this->image().length()), ::google::protobuf::internal::WireFormatLite::SERIALIZE, - "flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name"); + "flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image"); ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( - 2, this->pod_template_name(), output); + 2, this->image(), output); + } + + // .flyteidl.core.Resources resources = 3; + if (this->has_resources()) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 3, HasBitSetters::resources(this), output); } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; if (this->restart_policy() != 0) { ::google::protobuf::internal::WireFormatLite::WriteEnum( - 3, this->restart_policy(), output); + 4, this->restart_policy(), output); } if (_internal_metadata_.have_unknown_fields()) { @@ -948,21 +958,28 @@ ::google::protobuf::uint8* DistributedTensorflowTrainingReplicaSpec::InternalSer target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->replicas(), target); } - // string pod_template_name = 2; - if (this->pod_template_name().size() > 0) { + // string image = 2; + if (this->image().size() > 0) { ::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->pod_template_name().data(), static_cast(this->pod_template_name().length()), + this->image().data(), static_cast(this->image().length()), ::google::protobuf::internal::WireFormatLite::SERIALIZE, - "flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name"); + "flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image"); target = ::google::protobuf::internal::WireFormatLite::WriteStringToArray( - 2, this->pod_template_name(), target); + 2, this->image(), target); } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.core.Resources resources = 3; + if (this->has_resources()) { + target = ::google::protobuf::internal::WireFormatLite:: + InternalWriteMessageToArray( + 3, HasBitSetters::resources(this), target); + } + + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; if (this->restart_policy() != 0) { target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray( - 3, this->restart_policy(), target); + 4, this->restart_policy(), target); } if (_internal_metadata_.have_unknown_fields()) { @@ -986,11 +1003,18 @@ size_t DistributedTensorflowTrainingReplicaSpec::ByteSizeLong() const { // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // string pod_template_name = 2; - if (this->pod_template_name().size() > 0) { + // string image = 2; + if (this->image().size() > 0) { total_size += 1 + ::google::protobuf::internal::WireFormatLite::StringSize( - this->pod_template_name()); + this->image()); + } + + // .flyteidl.core.Resources resources = 3; + if (this->has_resources()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::MessageSize( + *resources_); } // int32 replicas = 1; @@ -1000,7 +1024,7 @@ size_t DistributedTensorflowTrainingReplicaSpec::ByteSizeLong() const { this->replicas()); } - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; if (this->restart_policy() != 0) { total_size += 1 + ::google::protobuf::internal::WireFormatLite::EnumSize(this->restart_policy()); @@ -1033,9 +1057,12 @@ void DistributedTensorflowTrainingReplicaSpec::MergeFrom(const DistributedTensor ::google::protobuf::uint32 cached_has_bits = 0; (void) cached_has_bits; - if (from.pod_template_name().size() > 0) { + if (from.image().size() > 0) { - pod_template_name_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.pod_template_name_); + image_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.image_); + } + if (from.has_resources()) { + mutable_resources()->::flyteidl::core::Resources::MergeFrom(from.resources()); } if (from.replicas() != 0) { set_replicas(from.replicas()); @@ -1070,8 +1097,9 @@ void DistributedTensorflowTrainingReplicaSpec::Swap(DistributedTensorflowTrainin void DistributedTensorflowTrainingReplicaSpec::InternalSwap(DistributedTensorflowTrainingReplicaSpec* other) { using std::swap; _internal_metadata_.Swap(&other->_internal_metadata_); - pod_template_name_.Swap(&other->pod_template_name_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), + image_.Swap(&other->image_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); + swap(resources_, other->resources_); swap(replicas_, other->replicas_); swap(restart_policy_, other->restart_policy_); } diff --git a/gen/pb-cpp/flyteidl/plugins/kubeflow/tensorflow.pb.h b/gen/pb-cpp/flyteidl/plugins/kubeflow/tensorflow.pb.h index 933f28bef..9839ca481 100644 --- a/gen/pb-cpp/flyteidl/plugins/kubeflow/tensorflow.pb.h +++ b/gen/pb-cpp/flyteidl/plugins/kubeflow/tensorflow.pb.h @@ -31,6 +31,7 @@ #include // IWYU pragma: export #include // IWYU pragma: export #include +#include "flyteidl/core/tasks.pb.h" #include "flyteidl/plugins/kubeflow/common.pb.h" // @@protoc_insertion_point(includes) #include @@ -204,12 +205,6 @@ class DistributedTensorflowTrainingTask final : ::flyteidl::plugins::kubeflow::RunPolicy* mutable_run_policy(); void set_allocated_run_policy(::flyteidl::plugins::kubeflow::RunPolicy* run_policy); - // .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - void clear_success_policy(); - static const int kSuccessPolicyFieldNumber = 5; - ::flyteidl::plugins::kubeflow::SuccessPolicy success_policy() const; - void set_success_policy(::flyteidl::plugins::kubeflow::SuccessPolicy value); - // @@protoc_insertion_point(class_scope:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingTask) private: class HasBitSetters; @@ -219,7 +214,6 @@ class DistributedTensorflowTrainingTask final : ::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec* ps_replicas_; ::flyteidl::plugins::kubeflow::DistributedTensorflowTrainingReplicaSpec* chief_replicas_; ::flyteidl::plugins::kubeflow::RunPolicy* run_policy_; - int success_policy_; mutable ::google::protobuf::internal::CachedSize _cached_size_; friend struct ::TableStruct_flyteidl_2fplugins_2fkubeflow_2ftensorflow_2eproto; }; @@ -320,19 +314,28 @@ class DistributedTensorflowTrainingReplicaSpec final : // accessors ------------------------------------------------------- - // string pod_template_name = 2; - void clear_pod_template_name(); - static const int kPodTemplateNameFieldNumber = 2; - const ::std::string& pod_template_name() const; - void set_pod_template_name(const ::std::string& value); + // string image = 2; + void clear_image(); + static const int kImageFieldNumber = 2; + const ::std::string& image() const; + void set_image(const ::std::string& value); #if LANG_CXX11 - void set_pod_template_name(::std::string&& value); + void set_image(::std::string&& value); #endif - void set_pod_template_name(const char* value); - void set_pod_template_name(const char* value, size_t size); - ::std::string* mutable_pod_template_name(); - ::std::string* release_pod_template_name(); - void set_allocated_pod_template_name(::std::string* pod_template_name); + void set_image(const char* value); + void set_image(const char* value, size_t size); + ::std::string* mutable_image(); + ::std::string* release_image(); + void set_allocated_image(::std::string* image); + + // .flyteidl.core.Resources resources = 3; + bool has_resources() const; + void clear_resources(); + static const int kResourcesFieldNumber = 3; + const ::flyteidl::core::Resources& resources() const; + ::flyteidl::core::Resources* release_resources(); + ::flyteidl::core::Resources* mutable_resources(); + void set_allocated_resources(::flyteidl::core::Resources* resources); // int32 replicas = 1; void clear_replicas(); @@ -340,9 +343,9 @@ class DistributedTensorflowTrainingReplicaSpec final : ::google::protobuf::int32 replicas() const; void set_replicas(::google::protobuf::int32 value); - // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + // .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; void clear_restart_policy(); - static const int kRestartPolicyFieldNumber = 3; + static const int kRestartPolicyFieldNumber = 4; ::flyteidl::plugins::kubeflow::RestartPolicy restart_policy() const; void set_restart_policy(::flyteidl::plugins::kubeflow::RestartPolicy value); @@ -351,7 +354,8 @@ class DistributedTensorflowTrainingReplicaSpec final : class HasBitSetters; ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; - ::google::protobuf::internal::ArenaStringPtr pod_template_name_; + ::google::protobuf::internal::ArenaStringPtr image_; + ::flyteidl::core::Resources* resources_; ::google::protobuf::int32 replicas_; int restart_policy_; mutable ::google::protobuf::internal::CachedSize _cached_size_; @@ -566,20 +570,6 @@ inline void DistributedTensorflowTrainingTask::set_allocated_run_policy(::flytei // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingTask.run_policy) } -// .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; -inline void DistributedTensorflowTrainingTask::clear_success_policy() { - success_policy_ = 0; -} -inline ::flyteidl::plugins::kubeflow::SuccessPolicy DistributedTensorflowTrainingTask::success_policy() const { - // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingTask.success_policy) - return static_cast< ::flyteidl::plugins::kubeflow::SuccessPolicy >(success_policy_); -} -inline void DistributedTensorflowTrainingTask::set_success_policy(::flyteidl::plugins::kubeflow::SuccessPolicy value) { - - success_policy_ = value; - // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingTask.success_policy) -} - // ------------------------------------------------------------------- // DistributedTensorflowTrainingReplicaSpec @@ -598,60 +588,105 @@ inline void DistributedTensorflowTrainingReplicaSpec::set_replicas(::google::pro // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.replicas) } -// string pod_template_name = 2; -inline void DistributedTensorflowTrainingReplicaSpec::clear_pod_template_name() { - pod_template_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +// string image = 2; +inline void DistributedTensorflowTrainingReplicaSpec::clear_image() { + image_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); } -inline const ::std::string& DistributedTensorflowTrainingReplicaSpec::pod_template_name() const { - // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name) - return pod_template_name_.GetNoArena(); +inline const ::std::string& DistributedTensorflowTrainingReplicaSpec::image() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image) + return image_.GetNoArena(); } -inline void DistributedTensorflowTrainingReplicaSpec::set_pod_template_name(const ::std::string& value) { +inline void DistributedTensorflowTrainingReplicaSpec::set_image(const ::std::string& value) { - pod_template_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); - // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name) + image_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image) } #if LANG_CXX11 -inline void DistributedTensorflowTrainingReplicaSpec::set_pod_template_name(::std::string&& value) { +inline void DistributedTensorflowTrainingReplicaSpec::set_image(::std::string&& value) { - pod_template_name_.SetNoArena( + image_.SetNoArena( &::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); - // @@protoc_insertion_point(field_set_rvalue:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name) + // @@protoc_insertion_point(field_set_rvalue:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image) } #endif -inline void DistributedTensorflowTrainingReplicaSpec::set_pod_template_name(const char* value) { +inline void DistributedTensorflowTrainingReplicaSpec::set_image(const char* value) { GOOGLE_DCHECK(value != nullptr); - pod_template_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); - // @@protoc_insertion_point(field_set_char:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name) + image_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image) } -inline void DistributedTensorflowTrainingReplicaSpec::set_pod_template_name(const char* value, size_t size) { +inline void DistributedTensorflowTrainingReplicaSpec::set_image(const char* value, size_t size) { - pod_template_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), + image_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(reinterpret_cast(value), size)); - // @@protoc_insertion_point(field_set_pointer:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name) + // @@protoc_insertion_point(field_set_pointer:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image) +} +inline ::std::string* DistributedTensorflowTrainingReplicaSpec::mutable_image() { + + // @@protoc_insertion_point(field_mutable:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image) + return image_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline ::std::string* DistributedTensorflowTrainingReplicaSpec::release_image() { + // @@protoc_insertion_point(field_release:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image) + + return image_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline void DistributedTensorflowTrainingReplicaSpec::set_allocated_image(::std::string* image) { + if (image != nullptr) { + + } else { + + } + image_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), image); + // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.image) +} + +// .flyteidl.core.Resources resources = 3; +inline bool DistributedTensorflowTrainingReplicaSpec::has_resources() const { + return this != internal_default_instance() && resources_ != nullptr; } -inline ::std::string* DistributedTensorflowTrainingReplicaSpec::mutable_pod_template_name() { +inline const ::flyteidl::core::Resources& DistributedTensorflowTrainingReplicaSpec::resources() const { + const ::flyteidl::core::Resources* p = resources_; + // @@protoc_insertion_point(field_get:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.resources) + return p != nullptr ? *p : *reinterpret_cast( + &::flyteidl::core::_Resources_default_instance_); +} +inline ::flyteidl::core::Resources* DistributedTensorflowTrainingReplicaSpec::release_resources() { + // @@protoc_insertion_point(field_release:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.resources) - // @@protoc_insertion_point(field_mutable:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name) - return pod_template_name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + ::flyteidl::core::Resources* temp = resources_; + resources_ = nullptr; + return temp; } -inline ::std::string* DistributedTensorflowTrainingReplicaSpec::release_pod_template_name() { - // @@protoc_insertion_point(field_release:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name) +inline ::flyteidl::core::Resources* DistributedTensorflowTrainingReplicaSpec::mutable_resources() { - return pod_template_name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (resources_ == nullptr) { + auto* p = CreateMaybeMessage<::flyteidl::core::Resources>(GetArenaNoVirtual()); + resources_ = p; + } + // @@protoc_insertion_point(field_mutable:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.resources) + return resources_; } -inline void DistributedTensorflowTrainingReplicaSpec::set_allocated_pod_template_name(::std::string* pod_template_name) { - if (pod_template_name != nullptr) { +inline void DistributedTensorflowTrainingReplicaSpec::set_allocated_resources(::flyteidl::core::Resources* resources) { + ::google::protobuf::Arena* message_arena = GetArenaNoVirtual(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::google::protobuf::MessageLite*>(resources_); + } + if (resources) { + ::google::protobuf::Arena* submessage_arena = nullptr; + if (message_arena != submessage_arena) { + resources = ::google::protobuf::internal::GetOwnedMessage( + message_arena, resources, submessage_arena); + } } else { } - pod_template_name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), pod_template_name); - // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.pod_template_name) + resources_ = resources; + // @@protoc_insertion_point(field_set_allocated:flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.resources) } -// .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; +// .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; inline void DistributedTensorflowTrainingReplicaSpec::clear_restart_policy() { restart_policy_ = 0; } diff --git a/gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.cc b/gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.cc new file mode 100644 index 000000000..8a312292c --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.cc @@ -0,0 +1,24 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flyteidl/plugins/mpi.proto + +#include "flyteidl/plugins/mpi.pb.h" +#include "flyteidl/plugins/mpi.grpc.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace flyteidl { +namespace plugins { + +} // namespace flyteidl +} // namespace plugins + diff --git a/gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.h b/gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.h new file mode 100644 index 000000000..6725f0e0e --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/mpi.grpc.pb.h @@ -0,0 +1,47 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flyteidl/plugins/mpi.proto +#ifndef GRPC_flyteidl_2fplugins_2fmpi_2eproto__INCLUDED +#define GRPC_flyteidl_2fplugins_2fmpi_2eproto__INCLUDED + +#include "flyteidl/plugins/mpi.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace grpc_impl { +class Channel; +class CompletionQueue; +class ServerCompletionQueue; +} // namespace grpc_impl + +namespace grpc { +namespace experimental { +template +class MessageAllocator; +} // namespace experimental +} // namespace grpc_impl + +namespace grpc { +class ServerContext; +} // namespace grpc + +namespace flyteidl { +namespace plugins { + +} // namespace plugins +} // namespace flyteidl + + +#endif // GRPC_flyteidl_2fplugins_2fmpi_2eproto__INCLUDED diff --git a/gen/pb-cpp/flyteidl/plugins/mpi.pb.cc b/gen/pb-cpp/flyteidl/plugins/mpi.pb.cc new file mode 100644 index 000000000..87aa4c458 --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/mpi.pb.cc @@ -0,0 +1,461 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flyteidl/plugins/mpi.proto + +#include "flyteidl/plugins/mpi.pb.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) +#include + +namespace flyteidl { +namespace plugins { +class DistributedMPITrainingTaskDefaultTypeInternal { + public: + ::google::protobuf::internal::ExplicitlyConstructed _instance; +} _DistributedMPITrainingTask_default_instance_; +} // namespace plugins +} // namespace flyteidl +static void InitDefaultsDistributedMPITrainingTask_flyteidl_2fplugins_2fmpi_2eproto() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::flyteidl::plugins::_DistributedMPITrainingTask_default_instance_; + new (ptr) ::flyteidl::plugins::DistributedMPITrainingTask(); + ::google::protobuf::internal::OnShutdownDestroyMessage(ptr); + } + ::flyteidl::plugins::DistributedMPITrainingTask::InitAsDefaultInstance(); +} + +::google::protobuf::internal::SCCInfo<0> scc_info_DistributedMPITrainingTask_flyteidl_2fplugins_2fmpi_2eproto = + {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsDistributedMPITrainingTask_flyteidl_2fplugins_2fmpi_2eproto}, {}}; + +void InitDefaults_flyteidl_2fplugins_2fmpi_2eproto() { + ::google::protobuf::internal::InitSCC(&scc_info_DistributedMPITrainingTask_flyteidl_2fplugins_2fmpi_2eproto.base); +} + +::google::protobuf::Metadata file_level_metadata_flyteidl_2fplugins_2fmpi_2eproto[1]; +constexpr ::google::protobuf::EnumDescriptor const** file_level_enum_descriptors_flyteidl_2fplugins_2fmpi_2eproto = nullptr; +constexpr ::google::protobuf::ServiceDescriptor const** file_level_service_descriptors_flyteidl_2fplugins_2fmpi_2eproto = nullptr; + +const ::google::protobuf::uint32 TableStruct_flyteidl_2fplugins_2fmpi_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedMPITrainingTask, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedMPITrainingTask, num_workers_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedMPITrainingTask, num_launcher_replicas_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedMPITrainingTask, slots_), +}; +static const ::google::protobuf::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + { 0, -1, sizeof(::flyteidl::plugins::DistributedMPITrainingTask)}, +}; + +static ::google::protobuf::Message const * const file_default_instances[] = { + reinterpret_cast(&::flyteidl::plugins::_DistributedMPITrainingTask_default_instance_), +}; + +::google::protobuf::internal::AssignDescriptorsTable assign_descriptors_table_flyteidl_2fplugins_2fmpi_2eproto = { + {}, AddDescriptors_flyteidl_2fplugins_2fmpi_2eproto, "flyteidl/plugins/mpi.proto", schemas, + file_default_instances, TableStruct_flyteidl_2fplugins_2fmpi_2eproto::offsets, + file_level_metadata_flyteidl_2fplugins_2fmpi_2eproto, 1, file_level_enum_descriptors_flyteidl_2fplugins_2fmpi_2eproto, file_level_service_descriptors_flyteidl_2fplugins_2fmpi_2eproto, +}; + +const char descriptor_table_protodef_flyteidl_2fplugins_2fmpi_2eproto[] = + "\n\032flyteidl/plugins/mpi.proto\022\020flyteidl.p" + "lugins\"_\n\032DistributedMPITrainingTask\022\023\n\013" + "num_workers\030\001 \001(\005\022\035\n\025num_launcher_replic" + "as\030\002 \001(\005\022\r\n\005slots\030\003 \001(\005B9Z7github.com/fl" + "yteorg/flyteidl/gen/pb-go/flyteidl/plugi" + "nsb\006proto3" + ; +::google::protobuf::internal::DescriptorTable descriptor_table_flyteidl_2fplugins_2fmpi_2eproto = { + false, InitDefaults_flyteidl_2fplugins_2fmpi_2eproto, + descriptor_table_protodef_flyteidl_2fplugins_2fmpi_2eproto, + "flyteidl/plugins/mpi.proto", &assign_descriptors_table_flyteidl_2fplugins_2fmpi_2eproto, 210, +}; + +void AddDescriptors_flyteidl_2fplugins_2fmpi_2eproto() { + static constexpr ::google::protobuf::internal::InitFunc deps[1] = + { + }; + ::google::protobuf::internal::AddDescriptors(&descriptor_table_flyteidl_2fplugins_2fmpi_2eproto, deps, 0); +} + +// Force running AddDescriptors() at dynamic initialization time. +static bool dynamic_init_dummy_flyteidl_2fplugins_2fmpi_2eproto = []() { AddDescriptors_flyteidl_2fplugins_2fmpi_2eproto(); return true; }(); +namespace flyteidl { +namespace plugins { + +// =================================================================== + +void DistributedMPITrainingTask::InitAsDefaultInstance() { +} +class DistributedMPITrainingTask::HasBitSetters { + public: +}; + +#if !defined(_MSC_VER) || _MSC_VER >= 1900 +const int DistributedMPITrainingTask::kNumWorkersFieldNumber; +const int DistributedMPITrainingTask::kNumLauncherReplicasFieldNumber; +const int DistributedMPITrainingTask::kSlotsFieldNumber; +#endif // !defined(_MSC_VER) || _MSC_VER >= 1900 + +DistributedMPITrainingTask::DistributedMPITrainingTask() + : ::google::protobuf::Message(), _internal_metadata_(nullptr) { + SharedCtor(); + // @@protoc_insertion_point(constructor:flyteidl.plugins.DistributedMPITrainingTask) +} +DistributedMPITrainingTask::DistributedMPITrainingTask(const DistributedMPITrainingTask& from) + : ::google::protobuf::Message(), + _internal_metadata_(nullptr) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::memcpy(&num_workers_, &from.num_workers_, + static_cast(reinterpret_cast(&slots_) - + reinterpret_cast(&num_workers_)) + sizeof(slots_)); + // @@protoc_insertion_point(copy_constructor:flyteidl.plugins.DistributedMPITrainingTask) +} + +void DistributedMPITrainingTask::SharedCtor() { + ::memset(&num_workers_, 0, static_cast( + reinterpret_cast(&slots_) - + reinterpret_cast(&num_workers_)) + sizeof(slots_)); +} + +DistributedMPITrainingTask::~DistributedMPITrainingTask() { + // @@protoc_insertion_point(destructor:flyteidl.plugins.DistributedMPITrainingTask) + SharedDtor(); +} + +void DistributedMPITrainingTask::SharedDtor() { +} + +void DistributedMPITrainingTask::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const DistributedMPITrainingTask& DistributedMPITrainingTask::default_instance() { + ::google::protobuf::internal::InitSCC(&::scc_info_DistributedMPITrainingTask_flyteidl_2fplugins_2fmpi_2eproto.base); + return *internal_default_instance(); +} + + +void DistributedMPITrainingTask::Clear() { +// @@protoc_insertion_point(message_clear_start:flyteidl.plugins.DistributedMPITrainingTask) + ::google::protobuf::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + ::memset(&num_workers_, 0, static_cast( + reinterpret_cast(&slots_) - + reinterpret_cast(&num_workers_)) + sizeof(slots_)); + _internal_metadata_.Clear(); +} + +#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +const char* DistributedMPITrainingTask::_InternalParse(const char* begin, const char* end, void* object, + ::google::protobuf::internal::ParseContext* ctx) { + auto msg = static_cast(object); + ::google::protobuf::int32 size; (void)size; + int depth; (void)depth; + ::google::protobuf::uint32 tag; + ::google::protobuf::internal::ParseFunc parser_till_end; (void)parser_till_end; + auto ptr = begin; + while (ptr < end) { + ptr = ::google::protobuf::io::Parse32(ptr, &tag); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + switch (tag >> 3) { + // int32 num_workers = 1; + case 1: { + if (static_cast<::google::protobuf::uint8>(tag) != 8) goto handle_unusual; + msg->set_num_workers(::google::protobuf::internal::ReadVarint(&ptr)); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + break; + } + // int32 num_launcher_replicas = 2; + case 2: { + if (static_cast<::google::protobuf::uint8>(tag) != 16) goto handle_unusual; + msg->set_num_launcher_replicas(::google::protobuf::internal::ReadVarint(&ptr)); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + break; + } + // int32 slots = 3; + case 3: { + if (static_cast<::google::protobuf::uint8>(tag) != 24) goto handle_unusual; + msg->set_slots(::google::protobuf::internal::ReadVarint(&ptr)); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + break; + } + default: { + handle_unusual: + if ((tag & 7) == 4 || tag == 0) { + ctx->EndGroup(tag); + return ptr; + } + auto res = UnknownFieldParse(tag, {_InternalParse, msg}, + ptr, end, msg->_internal_metadata_.mutable_unknown_fields(), ctx); + ptr = res.first; + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr); + if (res.second) return ptr; + } + } // switch + } // while + return ptr; +} +#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +bool DistributedMPITrainingTask::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:flyteidl.plugins.DistributedMPITrainingTask) + for (;;) { + ::std::pair<::google::protobuf::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // int32 num_workers = 1; + case 1: { + if (static_cast< ::google::protobuf::uint8>(tag) == (8 & 0xFF)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( + input, &num_workers_))); + } else { + goto handle_unusual; + } + break; + } + + // int32 num_launcher_replicas = 2; + case 2: { + if (static_cast< ::google::protobuf::uint8>(tag) == (16 & 0xFF)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( + input, &num_launcher_replicas_))); + } else { + goto handle_unusual; + } + break; + } + + // int32 slots = 3; + case 3: { + if (static_cast< ::google::protobuf::uint8>(tag) == (24 & 0xFF)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( + input, &slots_))); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:flyteidl.plugins.DistributedMPITrainingTask) + return true; +failure: + // @@protoc_insertion_point(parse_failure:flyteidl.plugins.DistributedMPITrainingTask) + return false; +#undef DO_ +} +#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + +void DistributedMPITrainingTask::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:flyteidl.plugins.DistributedMPITrainingTask) + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // int32 num_workers = 1; + if (this->num_workers() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->num_workers(), output); + } + + // int32 num_launcher_replicas = 2; + if (this->num_launcher_replicas() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteInt32(2, this->num_launcher_replicas(), output); + } + + // int32 slots = 3; + if (this->slots() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteInt32(3, this->slots(), output); + } + + if (_internal_metadata_.have_unknown_fields()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + _internal_metadata_.unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:flyteidl.plugins.DistributedMPITrainingTask) +} + +::google::protobuf::uint8* DistributedMPITrainingTask::InternalSerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:flyteidl.plugins.DistributedMPITrainingTask) + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // int32 num_workers = 1; + if (this->num_workers() != 0) { + target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->num_workers(), target); + } + + // int32 num_launcher_replicas = 2; + if (this->num_launcher_replicas() != 0) { + target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(2, this->num_launcher_replicas(), target); + } + + // int32 slots = 3; + if (this->slots() != 0) { + target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(3, this->slots(), target); + } + + if (_internal_metadata_.have_unknown_fields()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:flyteidl.plugins.DistributedMPITrainingTask) + return target; +} + +size_t DistributedMPITrainingTask::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flyteidl.plugins.DistributedMPITrainingTask) + size_t total_size = 0; + + if (_internal_metadata_.have_unknown_fields()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + _internal_metadata_.unknown_fields()); + } + ::google::protobuf::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // int32 num_workers = 1; + if (this->num_workers() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int32Size( + this->num_workers()); + } + + // int32 num_launcher_replicas = 2; + if (this->num_launcher_replicas() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int32Size( + this->num_launcher_replicas()); + } + + // int32 slots = 3; + if (this->slots() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int32Size( + this->slots()); + } + + int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void DistributedMPITrainingTask::MergeFrom(const ::google::protobuf::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:flyteidl.plugins.DistributedMPITrainingTask) + GOOGLE_DCHECK_NE(&from, this); + const DistributedMPITrainingTask* source = + ::google::protobuf::DynamicCastToGenerated( + &from); + if (source == nullptr) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:flyteidl.plugins.DistributedMPITrainingTask) + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:flyteidl.plugins.DistributedMPITrainingTask) + MergeFrom(*source); + } +} + +void DistributedMPITrainingTask::MergeFrom(const DistributedMPITrainingTask& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flyteidl.plugins.DistributedMPITrainingTask) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from.num_workers() != 0) { + set_num_workers(from.num_workers()); + } + if (from.num_launcher_replicas() != 0) { + set_num_launcher_replicas(from.num_launcher_replicas()); + } + if (from.slots() != 0) { + set_slots(from.slots()); + } +} + +void DistributedMPITrainingTask::CopyFrom(const ::google::protobuf::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:flyteidl.plugins.DistributedMPITrainingTask) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void DistributedMPITrainingTask::CopyFrom(const DistributedMPITrainingTask& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flyteidl.plugins.DistributedMPITrainingTask) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool DistributedMPITrainingTask::IsInitialized() const { + return true; +} + +void DistributedMPITrainingTask::Swap(DistributedMPITrainingTask* other) { + if (other == this) return; + InternalSwap(other); +} +void DistributedMPITrainingTask::InternalSwap(DistributedMPITrainingTask* other) { + using std::swap; + _internal_metadata_.Swap(&other->_internal_metadata_); + swap(num_workers_, other->num_workers_); + swap(num_launcher_replicas_, other->num_launcher_replicas_); + swap(slots_, other->slots_); +} + +::google::protobuf::Metadata DistributedMPITrainingTask::GetMetadata() const { + ::google::protobuf::internal::AssignDescriptors(&::assign_descriptors_table_flyteidl_2fplugins_2fmpi_2eproto); + return ::file_level_metadata_flyteidl_2fplugins_2fmpi_2eproto[kIndexInFileMessages]; +} + + +// @@protoc_insertion_point(namespace_scope) +} // namespace plugins +} // namespace flyteidl +namespace google { +namespace protobuf { +template<> PROTOBUF_NOINLINE ::flyteidl::plugins::DistributedMPITrainingTask* Arena::CreateMaybeMessage< ::flyteidl::plugins::DistributedMPITrainingTask >(Arena* arena) { + return Arena::CreateInternal< ::flyteidl::plugins::DistributedMPITrainingTask >(arena); +} +} // namespace protobuf +} // namespace google + +// @@protoc_insertion_point(global_scope) +#include diff --git a/gen/pb-cpp/flyteidl/plugins/mpi.pb.h b/gen/pb-cpp/flyteidl/plugins/mpi.pb.h new file mode 100644 index 000000000..119bb2ca9 --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/mpi.pb.h @@ -0,0 +1,257 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flyteidl/plugins/mpi.proto + +#ifndef PROTOBUF_INCLUDED_flyteidl_2fplugins_2fmpi_2eproto +#define PROTOBUF_INCLUDED_flyteidl_2fplugins_2fmpi_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3007000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3007000 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fmpi_2eproto + +// Internal implementation detail -- do not use these members. +struct TableStruct_flyteidl_2fplugins_2fmpi_2eproto { + static const ::google::protobuf::internal::ParseTableField entries[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::google::protobuf::internal::AuxillaryParseTableField aux[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::google::protobuf::internal::ParseTable schema[1] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::google::protobuf::internal::FieldMetadata field_metadata[]; + static const ::google::protobuf::internal::SerializationTable serialization_table[]; + static const ::google::protobuf::uint32 offsets[]; +}; +void AddDescriptors_flyteidl_2fplugins_2fmpi_2eproto(); +namespace flyteidl { +namespace plugins { +class DistributedMPITrainingTask; +class DistributedMPITrainingTaskDefaultTypeInternal; +extern DistributedMPITrainingTaskDefaultTypeInternal _DistributedMPITrainingTask_default_instance_; +} // namespace plugins +} // namespace flyteidl +namespace google { +namespace protobuf { +template<> ::flyteidl::plugins::DistributedMPITrainingTask* Arena::CreateMaybeMessage<::flyteidl::plugins::DistributedMPITrainingTask>(Arena*); +} // namespace protobuf +} // namespace google +namespace flyteidl { +namespace plugins { + +// =================================================================== + +class DistributedMPITrainingTask final : + public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:flyteidl.plugins.DistributedMPITrainingTask) */ { + public: + DistributedMPITrainingTask(); + virtual ~DistributedMPITrainingTask(); + + DistributedMPITrainingTask(const DistributedMPITrainingTask& from); + + inline DistributedMPITrainingTask& operator=(const DistributedMPITrainingTask& from) { + CopyFrom(from); + return *this; + } + #if LANG_CXX11 + DistributedMPITrainingTask(DistributedMPITrainingTask&& from) noexcept + : DistributedMPITrainingTask() { + *this = ::std::move(from); + } + + inline DistributedMPITrainingTask& operator=(DistributedMPITrainingTask&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + #endif + static const ::google::protobuf::Descriptor* descriptor() { + return default_instance().GetDescriptor(); + } + static const DistributedMPITrainingTask& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const DistributedMPITrainingTask* internal_default_instance() { + return reinterpret_cast( + &_DistributedMPITrainingTask_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + void Swap(DistributedMPITrainingTask* other); + friend void swap(DistributedMPITrainingTask& a, DistributedMPITrainingTask& b) { + a.Swap(&b); + } + + // implements Message ---------------------------------------------- + + inline DistributedMPITrainingTask* New() const final { + return CreateMaybeMessage(nullptr); + } + + DistributedMPITrainingTask* New(::google::protobuf::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::google::protobuf::Message& from) final; + void MergeFrom(const ::google::protobuf::Message& from) final; + void CopyFrom(const DistributedMPITrainingTask& from); + void MergeFrom(const DistributedMPITrainingTask& from); + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + static const char* _InternalParse(const char* begin, const char* end, void* object, ::google::protobuf::internal::ParseContext* ctx); + ::google::protobuf::internal::ParseFunc _ParseFunc() const final { return _InternalParse; } + #else + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) final; + #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const final; + ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(DistributedMPITrainingTask* other); + private: + inline ::google::protobuf::Arena* GetArenaNoVirtual() const { + return nullptr; + } + inline void* MaybeArenaPtr() const { + return nullptr; + } + public: + + ::google::protobuf::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // int32 num_workers = 1; + void clear_num_workers(); + static const int kNumWorkersFieldNumber = 1; + ::google::protobuf::int32 num_workers() const; + void set_num_workers(::google::protobuf::int32 value); + + // int32 num_launcher_replicas = 2; + void clear_num_launcher_replicas(); + static const int kNumLauncherReplicasFieldNumber = 2; + ::google::protobuf::int32 num_launcher_replicas() const; + void set_num_launcher_replicas(::google::protobuf::int32 value); + + // int32 slots = 3; + void clear_slots(); + static const int kSlotsFieldNumber = 3; + ::google::protobuf::int32 slots() const; + void set_slots(::google::protobuf::int32 value); + + // @@protoc_insertion_point(class_scope:flyteidl.plugins.DistributedMPITrainingTask) + private: + class HasBitSetters; + + ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; + ::google::protobuf::int32 num_workers_; + ::google::protobuf::int32 num_launcher_replicas_; + ::google::protobuf::int32 slots_; + mutable ::google::protobuf::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flyteidl_2fplugins_2fmpi_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// DistributedMPITrainingTask + +// int32 num_workers = 1; +inline void DistributedMPITrainingTask::clear_num_workers() { + num_workers_ = 0; +} +inline ::google::protobuf::int32 DistributedMPITrainingTask::num_workers() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.DistributedMPITrainingTask.num_workers) + return num_workers_; +} +inline void DistributedMPITrainingTask::set_num_workers(::google::protobuf::int32 value) { + + num_workers_ = value; + // @@protoc_insertion_point(field_set:flyteidl.plugins.DistributedMPITrainingTask.num_workers) +} + +// int32 num_launcher_replicas = 2; +inline void DistributedMPITrainingTask::clear_num_launcher_replicas() { + num_launcher_replicas_ = 0; +} +inline ::google::protobuf::int32 DistributedMPITrainingTask::num_launcher_replicas() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.DistributedMPITrainingTask.num_launcher_replicas) + return num_launcher_replicas_; +} +inline void DistributedMPITrainingTask::set_num_launcher_replicas(::google::protobuf::int32 value) { + + num_launcher_replicas_ = value; + // @@protoc_insertion_point(field_set:flyteidl.plugins.DistributedMPITrainingTask.num_launcher_replicas) +} + +// int32 slots = 3; +inline void DistributedMPITrainingTask::clear_slots() { + slots_ = 0; +} +inline ::google::protobuf::int32 DistributedMPITrainingTask::slots() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.DistributedMPITrainingTask.slots) + return slots_; +} +inline void DistributedMPITrainingTask::set_slots(::google::protobuf::int32 value) { + + slots_ = value; + // @@protoc_insertion_point(field_set:flyteidl.plugins.DistributedMPITrainingTask.slots) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +} // namespace plugins +} // namespace flyteidl + +// @@protoc_insertion_point(global_scope) + +#include +#endif // PROTOBUF_INCLUDED_flyteidl_2fplugins_2fmpi_2eproto diff --git a/gen/pb-cpp/flyteidl/plugins/pytorch.grpc.pb.cc b/gen/pb-cpp/flyteidl/plugins/pytorch.grpc.pb.cc new file mode 100644 index 000000000..e626cd085 --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/pytorch.grpc.pb.cc @@ -0,0 +1,24 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flyteidl/plugins/pytorch.proto + +#include "flyteidl/plugins/pytorch.pb.h" +#include "flyteidl/plugins/pytorch.grpc.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace flyteidl { +namespace plugins { + +} // namespace flyteidl +} // namespace plugins + diff --git a/gen/pb-cpp/flyteidl/plugins/pytorch.grpc.pb.h b/gen/pb-cpp/flyteidl/plugins/pytorch.grpc.pb.h new file mode 100644 index 000000000..8345dd3c6 --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/pytorch.grpc.pb.h @@ -0,0 +1,47 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flyteidl/plugins/pytorch.proto +#ifndef GRPC_flyteidl_2fplugins_2fpytorch_2eproto__INCLUDED +#define GRPC_flyteidl_2fplugins_2fpytorch_2eproto__INCLUDED + +#include "flyteidl/plugins/pytorch.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace grpc_impl { +class Channel; +class CompletionQueue; +class ServerCompletionQueue; +} // namespace grpc_impl + +namespace grpc { +namespace experimental { +template +class MessageAllocator; +} // namespace experimental +} // namespace grpc_impl + +namespace grpc { +class ServerContext; +} // namespace grpc + +namespace flyteidl { +namespace plugins { + +} // namespace plugins +} // namespace flyteidl + + +#endif // GRPC_flyteidl_2fplugins_2fpytorch_2eproto__INCLUDED diff --git a/gen/pb-cpp/flyteidl/plugins/pytorch.pb.cc b/gen/pb-cpp/flyteidl/plugins/pytorch.pb.cc new file mode 100644 index 000000000..9b1951b38 --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/pytorch.pb.cc @@ -0,0 +1,368 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flyteidl/plugins/pytorch.proto + +#include "flyteidl/plugins/pytorch.pb.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) +#include + +namespace flyteidl { +namespace plugins { +class DistributedPyTorchTrainingTaskDefaultTypeInternal { + public: + ::google::protobuf::internal::ExplicitlyConstructed _instance; +} _DistributedPyTorchTrainingTask_default_instance_; +} // namespace plugins +} // namespace flyteidl +static void InitDefaultsDistributedPyTorchTrainingTask_flyteidl_2fplugins_2fpytorch_2eproto() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::flyteidl::plugins::_DistributedPyTorchTrainingTask_default_instance_; + new (ptr) ::flyteidl::plugins::DistributedPyTorchTrainingTask(); + ::google::protobuf::internal::OnShutdownDestroyMessage(ptr); + } + ::flyteidl::plugins::DistributedPyTorchTrainingTask::InitAsDefaultInstance(); +} + +::google::protobuf::internal::SCCInfo<0> scc_info_DistributedPyTorchTrainingTask_flyteidl_2fplugins_2fpytorch_2eproto = + {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsDistributedPyTorchTrainingTask_flyteidl_2fplugins_2fpytorch_2eproto}, {}}; + +void InitDefaults_flyteidl_2fplugins_2fpytorch_2eproto() { + ::google::protobuf::internal::InitSCC(&scc_info_DistributedPyTorchTrainingTask_flyteidl_2fplugins_2fpytorch_2eproto.base); +} + +::google::protobuf::Metadata file_level_metadata_flyteidl_2fplugins_2fpytorch_2eproto[1]; +constexpr ::google::protobuf::EnumDescriptor const** file_level_enum_descriptors_flyteidl_2fplugins_2fpytorch_2eproto = nullptr; +constexpr ::google::protobuf::ServiceDescriptor const** file_level_service_descriptors_flyteidl_2fplugins_2fpytorch_2eproto = nullptr; + +const ::google::protobuf::uint32 TableStruct_flyteidl_2fplugins_2fpytorch_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedPyTorchTrainingTask, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedPyTorchTrainingTask, workers_), +}; +static const ::google::protobuf::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + { 0, -1, sizeof(::flyteidl::plugins::DistributedPyTorchTrainingTask)}, +}; + +static ::google::protobuf::Message const * const file_default_instances[] = { + reinterpret_cast(&::flyteidl::plugins::_DistributedPyTorchTrainingTask_default_instance_), +}; + +::google::protobuf::internal::AssignDescriptorsTable assign_descriptors_table_flyteidl_2fplugins_2fpytorch_2eproto = { + {}, AddDescriptors_flyteidl_2fplugins_2fpytorch_2eproto, "flyteidl/plugins/pytorch.proto", schemas, + file_default_instances, TableStruct_flyteidl_2fplugins_2fpytorch_2eproto::offsets, + file_level_metadata_flyteidl_2fplugins_2fpytorch_2eproto, 1, file_level_enum_descriptors_flyteidl_2fplugins_2fpytorch_2eproto, file_level_service_descriptors_flyteidl_2fplugins_2fpytorch_2eproto, +}; + +const char descriptor_table_protodef_flyteidl_2fplugins_2fpytorch_2eproto[] = + "\n\036flyteidl/plugins/pytorch.proto\022\020flytei" + "dl.plugins\"1\n\036DistributedPyTorchTraining" + "Task\022\017\n\007workers\030\001 \001(\005B9Z7github.com/flyt" + "eorg/flyteidl/gen/pb-go/flyteidl/plugins" + "b\006proto3" + ; +::google::protobuf::internal::DescriptorTable descriptor_table_flyteidl_2fplugins_2fpytorch_2eproto = { + false, InitDefaults_flyteidl_2fplugins_2fpytorch_2eproto, + descriptor_table_protodef_flyteidl_2fplugins_2fpytorch_2eproto, + "flyteidl/plugins/pytorch.proto", &assign_descriptors_table_flyteidl_2fplugins_2fpytorch_2eproto, 168, +}; + +void AddDescriptors_flyteidl_2fplugins_2fpytorch_2eproto() { + static constexpr ::google::protobuf::internal::InitFunc deps[1] = + { + }; + ::google::protobuf::internal::AddDescriptors(&descriptor_table_flyteidl_2fplugins_2fpytorch_2eproto, deps, 0); +} + +// Force running AddDescriptors() at dynamic initialization time. +static bool dynamic_init_dummy_flyteidl_2fplugins_2fpytorch_2eproto = []() { AddDescriptors_flyteidl_2fplugins_2fpytorch_2eproto(); return true; }(); +namespace flyteidl { +namespace plugins { + +// =================================================================== + +void DistributedPyTorchTrainingTask::InitAsDefaultInstance() { +} +class DistributedPyTorchTrainingTask::HasBitSetters { + public: +}; + +#if !defined(_MSC_VER) || _MSC_VER >= 1900 +const int DistributedPyTorchTrainingTask::kWorkersFieldNumber; +#endif // !defined(_MSC_VER) || _MSC_VER >= 1900 + +DistributedPyTorchTrainingTask::DistributedPyTorchTrainingTask() + : ::google::protobuf::Message(), _internal_metadata_(nullptr) { + SharedCtor(); + // @@protoc_insertion_point(constructor:flyteidl.plugins.DistributedPyTorchTrainingTask) +} +DistributedPyTorchTrainingTask::DistributedPyTorchTrainingTask(const DistributedPyTorchTrainingTask& from) + : ::google::protobuf::Message(), + _internal_metadata_(nullptr) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + workers_ = from.workers_; + // @@protoc_insertion_point(copy_constructor:flyteidl.plugins.DistributedPyTorchTrainingTask) +} + +void DistributedPyTorchTrainingTask::SharedCtor() { + workers_ = 0; +} + +DistributedPyTorchTrainingTask::~DistributedPyTorchTrainingTask() { + // @@protoc_insertion_point(destructor:flyteidl.plugins.DistributedPyTorchTrainingTask) + SharedDtor(); +} + +void DistributedPyTorchTrainingTask::SharedDtor() { +} + +void DistributedPyTorchTrainingTask::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const DistributedPyTorchTrainingTask& DistributedPyTorchTrainingTask::default_instance() { + ::google::protobuf::internal::InitSCC(&::scc_info_DistributedPyTorchTrainingTask_flyteidl_2fplugins_2fpytorch_2eproto.base); + return *internal_default_instance(); +} + + +void DistributedPyTorchTrainingTask::Clear() { +// @@protoc_insertion_point(message_clear_start:flyteidl.plugins.DistributedPyTorchTrainingTask) + ::google::protobuf::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + workers_ = 0; + _internal_metadata_.Clear(); +} + +#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +const char* DistributedPyTorchTrainingTask::_InternalParse(const char* begin, const char* end, void* object, + ::google::protobuf::internal::ParseContext* ctx) { + auto msg = static_cast(object); + ::google::protobuf::int32 size; (void)size; + int depth; (void)depth; + ::google::protobuf::uint32 tag; + ::google::protobuf::internal::ParseFunc parser_till_end; (void)parser_till_end; + auto ptr = begin; + while (ptr < end) { + ptr = ::google::protobuf::io::Parse32(ptr, &tag); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + switch (tag >> 3) { + // int32 workers = 1; + case 1: { + if (static_cast<::google::protobuf::uint8>(tag) != 8) goto handle_unusual; + msg->set_workers(::google::protobuf::internal::ReadVarint(&ptr)); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + break; + } + default: { + handle_unusual: + if ((tag & 7) == 4 || tag == 0) { + ctx->EndGroup(tag); + return ptr; + } + auto res = UnknownFieldParse(tag, {_InternalParse, msg}, + ptr, end, msg->_internal_metadata_.mutable_unknown_fields(), ctx); + ptr = res.first; + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr); + if (res.second) return ptr; + } + } // switch + } // while + return ptr; +} +#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +bool DistributedPyTorchTrainingTask::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:flyteidl.plugins.DistributedPyTorchTrainingTask) + for (;;) { + ::std::pair<::google::protobuf::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // int32 workers = 1; + case 1: { + if (static_cast< ::google::protobuf::uint8>(tag) == (8 & 0xFF)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( + input, &workers_))); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:flyteidl.plugins.DistributedPyTorchTrainingTask) + return true; +failure: + // @@protoc_insertion_point(parse_failure:flyteidl.plugins.DistributedPyTorchTrainingTask) + return false; +#undef DO_ +} +#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + +void DistributedPyTorchTrainingTask::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:flyteidl.plugins.DistributedPyTorchTrainingTask) + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // int32 workers = 1; + if (this->workers() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->workers(), output); + } + + if (_internal_metadata_.have_unknown_fields()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + _internal_metadata_.unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:flyteidl.plugins.DistributedPyTorchTrainingTask) +} + +::google::protobuf::uint8* DistributedPyTorchTrainingTask::InternalSerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:flyteidl.plugins.DistributedPyTorchTrainingTask) + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // int32 workers = 1; + if (this->workers() != 0) { + target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->workers(), target); + } + + if (_internal_metadata_.have_unknown_fields()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:flyteidl.plugins.DistributedPyTorchTrainingTask) + return target; +} + +size_t DistributedPyTorchTrainingTask::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flyteidl.plugins.DistributedPyTorchTrainingTask) + size_t total_size = 0; + + if (_internal_metadata_.have_unknown_fields()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + _internal_metadata_.unknown_fields()); + } + ::google::protobuf::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // int32 workers = 1; + if (this->workers() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int32Size( + this->workers()); + } + + int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void DistributedPyTorchTrainingTask::MergeFrom(const ::google::protobuf::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:flyteidl.plugins.DistributedPyTorchTrainingTask) + GOOGLE_DCHECK_NE(&from, this); + const DistributedPyTorchTrainingTask* source = + ::google::protobuf::DynamicCastToGenerated( + &from); + if (source == nullptr) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:flyteidl.plugins.DistributedPyTorchTrainingTask) + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:flyteidl.plugins.DistributedPyTorchTrainingTask) + MergeFrom(*source); + } +} + +void DistributedPyTorchTrainingTask::MergeFrom(const DistributedPyTorchTrainingTask& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flyteidl.plugins.DistributedPyTorchTrainingTask) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from.workers() != 0) { + set_workers(from.workers()); + } +} + +void DistributedPyTorchTrainingTask::CopyFrom(const ::google::protobuf::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:flyteidl.plugins.DistributedPyTorchTrainingTask) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void DistributedPyTorchTrainingTask::CopyFrom(const DistributedPyTorchTrainingTask& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flyteidl.plugins.DistributedPyTorchTrainingTask) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool DistributedPyTorchTrainingTask::IsInitialized() const { + return true; +} + +void DistributedPyTorchTrainingTask::Swap(DistributedPyTorchTrainingTask* other) { + if (other == this) return; + InternalSwap(other); +} +void DistributedPyTorchTrainingTask::InternalSwap(DistributedPyTorchTrainingTask* other) { + using std::swap; + _internal_metadata_.Swap(&other->_internal_metadata_); + swap(workers_, other->workers_); +} + +::google::protobuf::Metadata DistributedPyTorchTrainingTask::GetMetadata() const { + ::google::protobuf::internal::AssignDescriptors(&::assign_descriptors_table_flyteidl_2fplugins_2fpytorch_2eproto); + return ::file_level_metadata_flyteidl_2fplugins_2fpytorch_2eproto[kIndexInFileMessages]; +} + + +// @@protoc_insertion_point(namespace_scope) +} // namespace plugins +} // namespace flyteidl +namespace google { +namespace protobuf { +template<> PROTOBUF_NOINLINE ::flyteidl::plugins::DistributedPyTorchTrainingTask* Arena::CreateMaybeMessage< ::flyteidl::plugins::DistributedPyTorchTrainingTask >(Arena* arena) { + return Arena::CreateInternal< ::flyteidl::plugins::DistributedPyTorchTrainingTask >(arena); +} +} // namespace protobuf +} // namespace google + +// @@protoc_insertion_point(global_scope) +#include diff --git a/gen/pb-cpp/flyteidl/plugins/pytorch.pb.h b/gen/pb-cpp/flyteidl/plugins/pytorch.pb.h new file mode 100644 index 000000000..c546d0f48 --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/pytorch.pb.h @@ -0,0 +1,215 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flyteidl/plugins/pytorch.proto + +#ifndef PROTOBUF_INCLUDED_flyteidl_2fplugins_2fpytorch_2eproto +#define PROTOBUF_INCLUDED_flyteidl_2fplugins_2fpytorch_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3007000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3007000 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2fpytorch_2eproto + +// Internal implementation detail -- do not use these members. +struct TableStruct_flyteidl_2fplugins_2fpytorch_2eproto { + static const ::google::protobuf::internal::ParseTableField entries[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::google::protobuf::internal::AuxillaryParseTableField aux[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::google::protobuf::internal::ParseTable schema[1] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::google::protobuf::internal::FieldMetadata field_metadata[]; + static const ::google::protobuf::internal::SerializationTable serialization_table[]; + static const ::google::protobuf::uint32 offsets[]; +}; +void AddDescriptors_flyteidl_2fplugins_2fpytorch_2eproto(); +namespace flyteidl { +namespace plugins { +class DistributedPyTorchTrainingTask; +class DistributedPyTorchTrainingTaskDefaultTypeInternal; +extern DistributedPyTorchTrainingTaskDefaultTypeInternal _DistributedPyTorchTrainingTask_default_instance_; +} // namespace plugins +} // namespace flyteidl +namespace google { +namespace protobuf { +template<> ::flyteidl::plugins::DistributedPyTorchTrainingTask* Arena::CreateMaybeMessage<::flyteidl::plugins::DistributedPyTorchTrainingTask>(Arena*); +} // namespace protobuf +} // namespace google +namespace flyteidl { +namespace plugins { + +// =================================================================== + +class DistributedPyTorchTrainingTask final : + public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:flyteidl.plugins.DistributedPyTorchTrainingTask) */ { + public: + DistributedPyTorchTrainingTask(); + virtual ~DistributedPyTorchTrainingTask(); + + DistributedPyTorchTrainingTask(const DistributedPyTorchTrainingTask& from); + + inline DistributedPyTorchTrainingTask& operator=(const DistributedPyTorchTrainingTask& from) { + CopyFrom(from); + return *this; + } + #if LANG_CXX11 + DistributedPyTorchTrainingTask(DistributedPyTorchTrainingTask&& from) noexcept + : DistributedPyTorchTrainingTask() { + *this = ::std::move(from); + } + + inline DistributedPyTorchTrainingTask& operator=(DistributedPyTorchTrainingTask&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + #endif + static const ::google::protobuf::Descriptor* descriptor() { + return default_instance().GetDescriptor(); + } + static const DistributedPyTorchTrainingTask& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const DistributedPyTorchTrainingTask* internal_default_instance() { + return reinterpret_cast( + &_DistributedPyTorchTrainingTask_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + void Swap(DistributedPyTorchTrainingTask* other); + friend void swap(DistributedPyTorchTrainingTask& a, DistributedPyTorchTrainingTask& b) { + a.Swap(&b); + } + + // implements Message ---------------------------------------------- + + inline DistributedPyTorchTrainingTask* New() const final { + return CreateMaybeMessage(nullptr); + } + + DistributedPyTorchTrainingTask* New(::google::protobuf::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::google::protobuf::Message& from) final; + void MergeFrom(const ::google::protobuf::Message& from) final; + void CopyFrom(const DistributedPyTorchTrainingTask& from); + void MergeFrom(const DistributedPyTorchTrainingTask& from); + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + static const char* _InternalParse(const char* begin, const char* end, void* object, ::google::protobuf::internal::ParseContext* ctx); + ::google::protobuf::internal::ParseFunc _ParseFunc() const final { return _InternalParse; } + #else + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) final; + #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const final; + ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(DistributedPyTorchTrainingTask* other); + private: + inline ::google::protobuf::Arena* GetArenaNoVirtual() const { + return nullptr; + } + inline void* MaybeArenaPtr() const { + return nullptr; + } + public: + + ::google::protobuf::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // int32 workers = 1; + void clear_workers(); + static const int kWorkersFieldNumber = 1; + ::google::protobuf::int32 workers() const; + void set_workers(::google::protobuf::int32 value); + + // @@protoc_insertion_point(class_scope:flyteidl.plugins.DistributedPyTorchTrainingTask) + private: + class HasBitSetters; + + ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; + ::google::protobuf::int32 workers_; + mutable ::google::protobuf::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flyteidl_2fplugins_2fpytorch_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// DistributedPyTorchTrainingTask + +// int32 workers = 1; +inline void DistributedPyTorchTrainingTask::clear_workers() { + workers_ = 0; +} +inline ::google::protobuf::int32 DistributedPyTorchTrainingTask::workers() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.DistributedPyTorchTrainingTask.workers) + return workers_; +} +inline void DistributedPyTorchTrainingTask::set_workers(::google::protobuf::int32 value) { + + workers_ = value; + // @@protoc_insertion_point(field_set:flyteidl.plugins.DistributedPyTorchTrainingTask.workers) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +} // namespace plugins +} // namespace flyteidl + +// @@protoc_insertion_point(global_scope) + +#include +#endif // PROTOBUF_INCLUDED_flyteidl_2fplugins_2fpytorch_2eproto diff --git a/gen/pb-cpp/flyteidl/plugins/tensorflow.grpc.pb.cc b/gen/pb-cpp/flyteidl/plugins/tensorflow.grpc.pb.cc new file mode 100644 index 000000000..f3a3c5622 --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/tensorflow.grpc.pb.cc @@ -0,0 +1,24 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flyteidl/plugins/tensorflow.proto + +#include "flyteidl/plugins/tensorflow.pb.h" +#include "flyteidl/plugins/tensorflow.grpc.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace flyteidl { +namespace plugins { + +} // namespace flyteidl +} // namespace plugins + diff --git a/gen/pb-cpp/flyteidl/plugins/tensorflow.grpc.pb.h b/gen/pb-cpp/flyteidl/plugins/tensorflow.grpc.pb.h new file mode 100644 index 000000000..1bc80de44 --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/tensorflow.grpc.pb.h @@ -0,0 +1,47 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flyteidl/plugins/tensorflow.proto +#ifndef GRPC_flyteidl_2fplugins_2ftensorflow_2eproto__INCLUDED +#define GRPC_flyteidl_2fplugins_2ftensorflow_2eproto__INCLUDED + +#include "flyteidl/plugins/tensorflow.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace grpc_impl { +class Channel; +class CompletionQueue; +class ServerCompletionQueue; +} // namespace grpc_impl + +namespace grpc { +namespace experimental { +template +class MessageAllocator; +} // namespace experimental +} // namespace grpc_impl + +namespace grpc { +class ServerContext; +} // namespace grpc + +namespace flyteidl { +namespace plugins { + +} // namespace plugins +} // namespace flyteidl + + +#endif // GRPC_flyteidl_2fplugins_2ftensorflow_2eproto__INCLUDED diff --git a/gen/pb-cpp/flyteidl/plugins/tensorflow.pb.cc b/gen/pb-cpp/flyteidl/plugins/tensorflow.pb.cc new file mode 100644 index 000000000..9dae11c1a --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/tensorflow.pb.cc @@ -0,0 +1,461 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flyteidl/plugins/tensorflow.proto + +#include "flyteidl/plugins/tensorflow.pb.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) +#include + +namespace flyteidl { +namespace plugins { +class DistributedTensorflowTrainingTaskDefaultTypeInternal { + public: + ::google::protobuf::internal::ExplicitlyConstructed _instance; +} _DistributedTensorflowTrainingTask_default_instance_; +} // namespace plugins +} // namespace flyteidl +static void InitDefaultsDistributedTensorflowTrainingTask_flyteidl_2fplugins_2ftensorflow_2eproto() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::flyteidl::plugins::_DistributedTensorflowTrainingTask_default_instance_; + new (ptr) ::flyteidl::plugins::DistributedTensorflowTrainingTask(); + ::google::protobuf::internal::OnShutdownDestroyMessage(ptr); + } + ::flyteidl::plugins::DistributedTensorflowTrainingTask::InitAsDefaultInstance(); +} + +::google::protobuf::internal::SCCInfo<0> scc_info_DistributedTensorflowTrainingTask_flyteidl_2fplugins_2ftensorflow_2eproto = + {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsDistributedTensorflowTrainingTask_flyteidl_2fplugins_2ftensorflow_2eproto}, {}}; + +void InitDefaults_flyteidl_2fplugins_2ftensorflow_2eproto() { + ::google::protobuf::internal::InitSCC(&scc_info_DistributedTensorflowTrainingTask_flyteidl_2fplugins_2ftensorflow_2eproto.base); +} + +::google::protobuf::Metadata file_level_metadata_flyteidl_2fplugins_2ftensorflow_2eproto[1]; +constexpr ::google::protobuf::EnumDescriptor const** file_level_enum_descriptors_flyteidl_2fplugins_2ftensorflow_2eproto = nullptr; +constexpr ::google::protobuf::ServiceDescriptor const** file_level_service_descriptors_flyteidl_2fplugins_2ftensorflow_2eproto = nullptr; + +const ::google::protobuf::uint32 TableStruct_flyteidl_2fplugins_2ftensorflow_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedTensorflowTrainingTask, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedTensorflowTrainingTask, workers_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedTensorflowTrainingTask, ps_replicas_), + PROTOBUF_FIELD_OFFSET(::flyteidl::plugins::DistributedTensorflowTrainingTask, chief_replicas_), +}; +static const ::google::protobuf::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + { 0, -1, sizeof(::flyteidl::plugins::DistributedTensorflowTrainingTask)}, +}; + +static ::google::protobuf::Message const * const file_default_instances[] = { + reinterpret_cast(&::flyteidl::plugins::_DistributedTensorflowTrainingTask_default_instance_), +}; + +::google::protobuf::internal::AssignDescriptorsTable assign_descriptors_table_flyteidl_2fplugins_2ftensorflow_2eproto = { + {}, AddDescriptors_flyteidl_2fplugins_2ftensorflow_2eproto, "flyteidl/plugins/tensorflow.proto", schemas, + file_default_instances, TableStruct_flyteidl_2fplugins_2ftensorflow_2eproto::offsets, + file_level_metadata_flyteidl_2fplugins_2ftensorflow_2eproto, 1, file_level_enum_descriptors_flyteidl_2fplugins_2ftensorflow_2eproto, file_level_service_descriptors_flyteidl_2fplugins_2ftensorflow_2eproto, +}; + +const char descriptor_table_protodef_flyteidl_2fplugins_2ftensorflow_2eproto[] = + "\n!flyteidl/plugins/tensorflow.proto\022\020fly" + "teidl.plugins\"a\n!DistributedTensorflowTr" + "ainingTask\022\017\n\007workers\030\001 \001(\005\022\023\n\013ps_replic" + "as\030\002 \001(\005\022\026\n\016chief_replicas\030\003 \001(\005B9Z7gith" + "ub.com/flyteorg/flyteidl/gen/pb-go/flyte" + "idl/pluginsb\006proto3" + ; +::google::protobuf::internal::DescriptorTable descriptor_table_flyteidl_2fplugins_2ftensorflow_2eproto = { + false, InitDefaults_flyteidl_2fplugins_2ftensorflow_2eproto, + descriptor_table_protodef_flyteidl_2fplugins_2ftensorflow_2eproto, + "flyteidl/plugins/tensorflow.proto", &assign_descriptors_table_flyteidl_2fplugins_2ftensorflow_2eproto, 219, +}; + +void AddDescriptors_flyteidl_2fplugins_2ftensorflow_2eproto() { + static constexpr ::google::protobuf::internal::InitFunc deps[1] = + { + }; + ::google::protobuf::internal::AddDescriptors(&descriptor_table_flyteidl_2fplugins_2ftensorflow_2eproto, deps, 0); +} + +// Force running AddDescriptors() at dynamic initialization time. +static bool dynamic_init_dummy_flyteidl_2fplugins_2ftensorflow_2eproto = []() { AddDescriptors_flyteidl_2fplugins_2ftensorflow_2eproto(); return true; }(); +namespace flyteidl { +namespace plugins { + +// =================================================================== + +void DistributedTensorflowTrainingTask::InitAsDefaultInstance() { +} +class DistributedTensorflowTrainingTask::HasBitSetters { + public: +}; + +#if !defined(_MSC_VER) || _MSC_VER >= 1900 +const int DistributedTensorflowTrainingTask::kWorkersFieldNumber; +const int DistributedTensorflowTrainingTask::kPsReplicasFieldNumber; +const int DistributedTensorflowTrainingTask::kChiefReplicasFieldNumber; +#endif // !defined(_MSC_VER) || _MSC_VER >= 1900 + +DistributedTensorflowTrainingTask::DistributedTensorflowTrainingTask() + : ::google::protobuf::Message(), _internal_metadata_(nullptr) { + SharedCtor(); + // @@protoc_insertion_point(constructor:flyteidl.plugins.DistributedTensorflowTrainingTask) +} +DistributedTensorflowTrainingTask::DistributedTensorflowTrainingTask(const DistributedTensorflowTrainingTask& from) + : ::google::protobuf::Message(), + _internal_metadata_(nullptr) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::memcpy(&workers_, &from.workers_, + static_cast(reinterpret_cast(&chief_replicas_) - + reinterpret_cast(&workers_)) + sizeof(chief_replicas_)); + // @@protoc_insertion_point(copy_constructor:flyteidl.plugins.DistributedTensorflowTrainingTask) +} + +void DistributedTensorflowTrainingTask::SharedCtor() { + ::memset(&workers_, 0, static_cast( + reinterpret_cast(&chief_replicas_) - + reinterpret_cast(&workers_)) + sizeof(chief_replicas_)); +} + +DistributedTensorflowTrainingTask::~DistributedTensorflowTrainingTask() { + // @@protoc_insertion_point(destructor:flyteidl.plugins.DistributedTensorflowTrainingTask) + SharedDtor(); +} + +void DistributedTensorflowTrainingTask::SharedDtor() { +} + +void DistributedTensorflowTrainingTask::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const DistributedTensorflowTrainingTask& DistributedTensorflowTrainingTask::default_instance() { + ::google::protobuf::internal::InitSCC(&::scc_info_DistributedTensorflowTrainingTask_flyteidl_2fplugins_2ftensorflow_2eproto.base); + return *internal_default_instance(); +} + + +void DistributedTensorflowTrainingTask::Clear() { +// @@protoc_insertion_point(message_clear_start:flyteidl.plugins.DistributedTensorflowTrainingTask) + ::google::protobuf::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + ::memset(&workers_, 0, static_cast( + reinterpret_cast(&chief_replicas_) - + reinterpret_cast(&workers_)) + sizeof(chief_replicas_)); + _internal_metadata_.Clear(); +} + +#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +const char* DistributedTensorflowTrainingTask::_InternalParse(const char* begin, const char* end, void* object, + ::google::protobuf::internal::ParseContext* ctx) { + auto msg = static_cast(object); + ::google::protobuf::int32 size; (void)size; + int depth; (void)depth; + ::google::protobuf::uint32 tag; + ::google::protobuf::internal::ParseFunc parser_till_end; (void)parser_till_end; + auto ptr = begin; + while (ptr < end) { + ptr = ::google::protobuf::io::Parse32(ptr, &tag); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + switch (tag >> 3) { + // int32 workers = 1; + case 1: { + if (static_cast<::google::protobuf::uint8>(tag) != 8) goto handle_unusual; + msg->set_workers(::google::protobuf::internal::ReadVarint(&ptr)); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + break; + } + // int32 ps_replicas = 2; + case 2: { + if (static_cast<::google::protobuf::uint8>(tag) != 16) goto handle_unusual; + msg->set_ps_replicas(::google::protobuf::internal::ReadVarint(&ptr)); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + break; + } + // int32 chief_replicas = 3; + case 3: { + if (static_cast<::google::protobuf::uint8>(tag) != 24) goto handle_unusual; + msg->set_chief_replicas(::google::protobuf::internal::ReadVarint(&ptr)); + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr); + break; + } + default: { + handle_unusual: + if ((tag & 7) == 4 || tag == 0) { + ctx->EndGroup(tag); + return ptr; + } + auto res = UnknownFieldParse(tag, {_InternalParse, msg}, + ptr, end, msg->_internal_metadata_.mutable_unknown_fields(), ctx); + ptr = res.first; + GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr); + if (res.second) return ptr; + } + } // switch + } // while + return ptr; +} +#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +bool DistributedTensorflowTrainingTask::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:flyteidl.plugins.DistributedTensorflowTrainingTask) + for (;;) { + ::std::pair<::google::protobuf::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // int32 workers = 1; + case 1: { + if (static_cast< ::google::protobuf::uint8>(tag) == (8 & 0xFF)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( + input, &workers_))); + } else { + goto handle_unusual; + } + break; + } + + // int32 ps_replicas = 2; + case 2: { + if (static_cast< ::google::protobuf::uint8>(tag) == (16 & 0xFF)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( + input, &ps_replicas_))); + } else { + goto handle_unusual; + } + break; + } + + // int32 chief_replicas = 3; + case 3: { + if (static_cast< ::google::protobuf::uint8>(tag) == (24 & 0xFF)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( + input, &chief_replicas_))); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:flyteidl.plugins.DistributedTensorflowTrainingTask) + return true; +failure: + // @@protoc_insertion_point(parse_failure:flyteidl.plugins.DistributedTensorflowTrainingTask) + return false; +#undef DO_ +} +#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + +void DistributedTensorflowTrainingTask::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:flyteidl.plugins.DistributedTensorflowTrainingTask) + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // int32 workers = 1; + if (this->workers() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->workers(), output); + } + + // int32 ps_replicas = 2; + if (this->ps_replicas() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteInt32(2, this->ps_replicas(), output); + } + + // int32 chief_replicas = 3; + if (this->chief_replicas() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteInt32(3, this->chief_replicas(), output); + } + + if (_internal_metadata_.have_unknown_fields()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + _internal_metadata_.unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:flyteidl.plugins.DistributedTensorflowTrainingTask) +} + +::google::protobuf::uint8* DistributedTensorflowTrainingTask::InternalSerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:flyteidl.plugins.DistributedTensorflowTrainingTask) + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // int32 workers = 1; + if (this->workers() != 0) { + target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->workers(), target); + } + + // int32 ps_replicas = 2; + if (this->ps_replicas() != 0) { + target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(2, this->ps_replicas(), target); + } + + // int32 chief_replicas = 3; + if (this->chief_replicas() != 0) { + target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(3, this->chief_replicas(), target); + } + + if (_internal_metadata_.have_unknown_fields()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:flyteidl.plugins.DistributedTensorflowTrainingTask) + return target; +} + +size_t DistributedTensorflowTrainingTask::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flyteidl.plugins.DistributedTensorflowTrainingTask) + size_t total_size = 0; + + if (_internal_metadata_.have_unknown_fields()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + _internal_metadata_.unknown_fields()); + } + ::google::protobuf::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // int32 workers = 1; + if (this->workers() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int32Size( + this->workers()); + } + + // int32 ps_replicas = 2; + if (this->ps_replicas() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int32Size( + this->ps_replicas()); + } + + // int32 chief_replicas = 3; + if (this->chief_replicas() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int32Size( + this->chief_replicas()); + } + + int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void DistributedTensorflowTrainingTask::MergeFrom(const ::google::protobuf::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:flyteidl.plugins.DistributedTensorflowTrainingTask) + GOOGLE_DCHECK_NE(&from, this); + const DistributedTensorflowTrainingTask* source = + ::google::protobuf::DynamicCastToGenerated( + &from); + if (source == nullptr) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:flyteidl.plugins.DistributedTensorflowTrainingTask) + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:flyteidl.plugins.DistributedTensorflowTrainingTask) + MergeFrom(*source); + } +} + +void DistributedTensorflowTrainingTask::MergeFrom(const DistributedTensorflowTrainingTask& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flyteidl.plugins.DistributedTensorflowTrainingTask) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from.workers() != 0) { + set_workers(from.workers()); + } + if (from.ps_replicas() != 0) { + set_ps_replicas(from.ps_replicas()); + } + if (from.chief_replicas() != 0) { + set_chief_replicas(from.chief_replicas()); + } +} + +void DistributedTensorflowTrainingTask::CopyFrom(const ::google::protobuf::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:flyteidl.plugins.DistributedTensorflowTrainingTask) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void DistributedTensorflowTrainingTask::CopyFrom(const DistributedTensorflowTrainingTask& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flyteidl.plugins.DistributedTensorflowTrainingTask) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool DistributedTensorflowTrainingTask::IsInitialized() const { + return true; +} + +void DistributedTensorflowTrainingTask::Swap(DistributedTensorflowTrainingTask* other) { + if (other == this) return; + InternalSwap(other); +} +void DistributedTensorflowTrainingTask::InternalSwap(DistributedTensorflowTrainingTask* other) { + using std::swap; + _internal_metadata_.Swap(&other->_internal_metadata_); + swap(workers_, other->workers_); + swap(ps_replicas_, other->ps_replicas_); + swap(chief_replicas_, other->chief_replicas_); +} + +::google::protobuf::Metadata DistributedTensorflowTrainingTask::GetMetadata() const { + ::google::protobuf::internal::AssignDescriptors(&::assign_descriptors_table_flyteidl_2fplugins_2ftensorflow_2eproto); + return ::file_level_metadata_flyteidl_2fplugins_2ftensorflow_2eproto[kIndexInFileMessages]; +} + + +// @@protoc_insertion_point(namespace_scope) +} // namespace plugins +} // namespace flyteidl +namespace google { +namespace protobuf { +template<> PROTOBUF_NOINLINE ::flyteidl::plugins::DistributedTensorflowTrainingTask* Arena::CreateMaybeMessage< ::flyteidl::plugins::DistributedTensorflowTrainingTask >(Arena* arena) { + return Arena::CreateInternal< ::flyteidl::plugins::DistributedTensorflowTrainingTask >(arena); +} +} // namespace protobuf +} // namespace google + +// @@protoc_insertion_point(global_scope) +#include diff --git a/gen/pb-cpp/flyteidl/plugins/tensorflow.pb.h b/gen/pb-cpp/flyteidl/plugins/tensorflow.pb.h new file mode 100644 index 000000000..613ed31d8 --- /dev/null +++ b/gen/pb-cpp/flyteidl/plugins/tensorflow.pb.h @@ -0,0 +1,257 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flyteidl/plugins/tensorflow.proto + +#ifndef PROTOBUF_INCLUDED_flyteidl_2fplugins_2ftensorflow_2eproto +#define PROTOBUF_INCLUDED_flyteidl_2fplugins_2ftensorflow_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3007000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3007000 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_flyteidl_2fplugins_2ftensorflow_2eproto + +// Internal implementation detail -- do not use these members. +struct TableStruct_flyteidl_2fplugins_2ftensorflow_2eproto { + static const ::google::protobuf::internal::ParseTableField entries[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::google::protobuf::internal::AuxillaryParseTableField aux[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::google::protobuf::internal::ParseTable schema[1] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::google::protobuf::internal::FieldMetadata field_metadata[]; + static const ::google::protobuf::internal::SerializationTable serialization_table[]; + static const ::google::protobuf::uint32 offsets[]; +}; +void AddDescriptors_flyteidl_2fplugins_2ftensorflow_2eproto(); +namespace flyteidl { +namespace plugins { +class DistributedTensorflowTrainingTask; +class DistributedTensorflowTrainingTaskDefaultTypeInternal; +extern DistributedTensorflowTrainingTaskDefaultTypeInternal _DistributedTensorflowTrainingTask_default_instance_; +} // namespace plugins +} // namespace flyteidl +namespace google { +namespace protobuf { +template<> ::flyteidl::plugins::DistributedTensorflowTrainingTask* Arena::CreateMaybeMessage<::flyteidl::plugins::DistributedTensorflowTrainingTask>(Arena*); +} // namespace protobuf +} // namespace google +namespace flyteidl { +namespace plugins { + +// =================================================================== + +class DistributedTensorflowTrainingTask final : + public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:flyteidl.plugins.DistributedTensorflowTrainingTask) */ { + public: + DistributedTensorflowTrainingTask(); + virtual ~DistributedTensorflowTrainingTask(); + + DistributedTensorflowTrainingTask(const DistributedTensorflowTrainingTask& from); + + inline DistributedTensorflowTrainingTask& operator=(const DistributedTensorflowTrainingTask& from) { + CopyFrom(from); + return *this; + } + #if LANG_CXX11 + DistributedTensorflowTrainingTask(DistributedTensorflowTrainingTask&& from) noexcept + : DistributedTensorflowTrainingTask() { + *this = ::std::move(from); + } + + inline DistributedTensorflowTrainingTask& operator=(DistributedTensorflowTrainingTask&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + #endif + static const ::google::protobuf::Descriptor* descriptor() { + return default_instance().GetDescriptor(); + } + static const DistributedTensorflowTrainingTask& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const DistributedTensorflowTrainingTask* internal_default_instance() { + return reinterpret_cast( + &_DistributedTensorflowTrainingTask_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + void Swap(DistributedTensorflowTrainingTask* other); + friend void swap(DistributedTensorflowTrainingTask& a, DistributedTensorflowTrainingTask& b) { + a.Swap(&b); + } + + // implements Message ---------------------------------------------- + + inline DistributedTensorflowTrainingTask* New() const final { + return CreateMaybeMessage(nullptr); + } + + DistributedTensorflowTrainingTask* New(::google::protobuf::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::google::protobuf::Message& from) final; + void MergeFrom(const ::google::protobuf::Message& from) final; + void CopyFrom(const DistributedTensorflowTrainingTask& from); + void MergeFrom(const DistributedTensorflowTrainingTask& from); + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + static const char* _InternalParse(const char* begin, const char* end, void* object, ::google::protobuf::internal::ParseContext* ctx); + ::google::protobuf::internal::ParseFunc _ParseFunc() const final { return _InternalParse; } + #else + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) final; + #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const final; + ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(DistributedTensorflowTrainingTask* other); + private: + inline ::google::protobuf::Arena* GetArenaNoVirtual() const { + return nullptr; + } + inline void* MaybeArenaPtr() const { + return nullptr; + } + public: + + ::google::protobuf::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // int32 workers = 1; + void clear_workers(); + static const int kWorkersFieldNumber = 1; + ::google::protobuf::int32 workers() const; + void set_workers(::google::protobuf::int32 value); + + // int32 ps_replicas = 2; + void clear_ps_replicas(); + static const int kPsReplicasFieldNumber = 2; + ::google::protobuf::int32 ps_replicas() const; + void set_ps_replicas(::google::protobuf::int32 value); + + // int32 chief_replicas = 3; + void clear_chief_replicas(); + static const int kChiefReplicasFieldNumber = 3; + ::google::protobuf::int32 chief_replicas() const; + void set_chief_replicas(::google::protobuf::int32 value); + + // @@protoc_insertion_point(class_scope:flyteidl.plugins.DistributedTensorflowTrainingTask) + private: + class HasBitSetters; + + ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; + ::google::protobuf::int32 workers_; + ::google::protobuf::int32 ps_replicas_; + ::google::protobuf::int32 chief_replicas_; + mutable ::google::protobuf::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flyteidl_2fplugins_2ftensorflow_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// DistributedTensorflowTrainingTask + +// int32 workers = 1; +inline void DistributedTensorflowTrainingTask::clear_workers() { + workers_ = 0; +} +inline ::google::protobuf::int32 DistributedTensorflowTrainingTask::workers() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.DistributedTensorflowTrainingTask.workers) + return workers_; +} +inline void DistributedTensorflowTrainingTask::set_workers(::google::protobuf::int32 value) { + + workers_ = value; + // @@protoc_insertion_point(field_set:flyteidl.plugins.DistributedTensorflowTrainingTask.workers) +} + +// int32 ps_replicas = 2; +inline void DistributedTensorflowTrainingTask::clear_ps_replicas() { + ps_replicas_ = 0; +} +inline ::google::protobuf::int32 DistributedTensorflowTrainingTask::ps_replicas() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.DistributedTensorflowTrainingTask.ps_replicas) + return ps_replicas_; +} +inline void DistributedTensorflowTrainingTask::set_ps_replicas(::google::protobuf::int32 value) { + + ps_replicas_ = value; + // @@protoc_insertion_point(field_set:flyteidl.plugins.DistributedTensorflowTrainingTask.ps_replicas) +} + +// int32 chief_replicas = 3; +inline void DistributedTensorflowTrainingTask::clear_chief_replicas() { + chief_replicas_ = 0; +} +inline ::google::protobuf::int32 DistributedTensorflowTrainingTask::chief_replicas() const { + // @@protoc_insertion_point(field_get:flyteidl.plugins.DistributedTensorflowTrainingTask.chief_replicas) + return chief_replicas_; +} +inline void DistributedTensorflowTrainingTask::set_chief_replicas(::google::protobuf::int32 value) { + + chief_replicas_ = value; + // @@protoc_insertion_point(field_set:flyteidl.plugins.DistributedTensorflowTrainingTask.chief_replicas) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +} // namespace plugins +} // namespace flyteidl + +// @@protoc_insertion_point(global_scope) + +#include +#endif // PROTOBUF_INCLUDED_flyteidl_2fplugins_2ftensorflow_2eproto diff --git a/gen/pb-go/flyteidl/plugins/kubeflow/common.pb.go b/gen/pb-go/flyteidl/plugins/kubeflow/common.pb.go index 190276aac..0e8cf8ce1 100644 --- a/gen/pb-go/flyteidl/plugins/kubeflow/common.pb.go +++ b/gen/pb-go/flyteidl/plugins/kubeflow/common.pb.go @@ -20,52 +20,27 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package -type SuccessPolicy int32 - -const ( - SuccessPolicy_SUCCESS_POLICY_DEFAULT SuccessPolicy = 0 - SuccessPolicy_SUCCESS_POLICY_ALL_WORKERS SuccessPolicy = 1 -) - -var SuccessPolicy_name = map[int32]string{ - 0: "SUCCESS_POLICY_DEFAULT", - 1: "SUCCESS_POLICY_ALL_WORKERS", -} - -var SuccessPolicy_value = map[string]int32{ - "SUCCESS_POLICY_DEFAULT": 0, - "SUCCESS_POLICY_ALL_WORKERS": 1, -} - -func (x SuccessPolicy) String() string { - return proto.EnumName(SuccessPolicy_name, int32(x)) -} - -func (SuccessPolicy) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_f625aa9156a15090, []int{0} -} - type CleanPodPolicy int32 const ( - CleanPodPolicy_CLEANPOD_POLICY_UNDEFINED CleanPodPolicy = 0 - CleanPodPolicy_CLEANPOD_POLICY_ALL CleanPodPolicy = 1 - CleanPodPolicy_CLEANPOD_POLICY_RUNNING CleanPodPolicy = 2 - CleanPodPolicy_CLEANPOD_POLICY_NONE CleanPodPolicy = 3 + // The All policy means all pods even completed pods will be deleted immediately when the job finishes. + CleanPodPolicy_CLEANPOD_POLICY_ALL CleanPodPolicy = 0 + // The Running policy means that only pods still running when a job completes (e.g. parameter servers) will be deleted immediately; completed pods will not be deleted so that the logs will be preserved. This is the default value. + CleanPodPolicy_CLEANPOD_POLICY_RUNNING CleanPodPolicy = 1 + // The None policy means that no pods will be deleted when the job completes. + CleanPodPolicy_CLEANPOD_POLICY_NONE CleanPodPolicy = 2 ) var CleanPodPolicy_name = map[int32]string{ - 0: "CLEANPOD_POLICY_UNDEFINED", - 1: "CLEANPOD_POLICY_ALL", - 2: "CLEANPOD_POLICY_RUNNING", - 3: "CLEANPOD_POLICY_NONE", + 0: "CLEANPOD_POLICY_ALL", + 1: "CLEANPOD_POLICY_RUNNING", + 2: "CLEANPOD_POLICY_NONE", } var CleanPodPolicy_value = map[string]int32{ - "CLEANPOD_POLICY_UNDEFINED": 0, - "CLEANPOD_POLICY_ALL": 1, - "CLEANPOD_POLICY_RUNNING": 2, - "CLEANPOD_POLICY_NONE": 3, + "CLEANPOD_POLICY_ALL": 0, + "CLEANPOD_POLICY_RUNNING": 1, + "CLEANPOD_POLICY_NONE": 2, } func (x CleanPodPolicy) String() string { @@ -73,7 +48,7 @@ func (x CleanPodPolicy) String() string { } func (CleanPodPolicy) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_f625aa9156a15090, []int{1} + return fileDescriptor_f625aa9156a15090, []int{0} } type RestartPolicy int32 @@ -109,15 +84,13 @@ func (x RestartPolicy) String() string { } func (RestartPolicy) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_f625aa9156a15090, []int{2} + return fileDescriptor_f625aa9156a15090, []int{1} } type RunPolicy struct { - // CleanPodPolicy defines the policy to kill pods after the job completes. - // Default to None. + // CleanPodPolicy defines the policy to kill pods after the job completes. Default to None. CleanPodPolicy CleanPodPolicy `protobuf:"varint,1,opt,name=clean_pod_policy,json=cleanPodPolicy,proto3,enum=flyteidl.plugins.kubeflow.CleanPodPolicy" json:"clean_pod_policy,omitempty"` - // TTL to clean up jobs. It may take extra ReconcilePeriod seconds for the cleanup, since - // reconcile gets called periodically. Default to infinite. + // TTL to clean up jobs. Default to infinite. TtlSecondsAfterFinished int32 `protobuf:"varint,2,opt,name=ttl_seconds_after_finished,json=ttlSecondsAfterFinished,proto3" json:"ttl_seconds_after_finished,omitempty"` // Specifies the duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer. @@ -158,7 +131,7 @@ func (m *RunPolicy) GetCleanPodPolicy() CleanPodPolicy { if m != nil { return m.CleanPodPolicy } - return CleanPodPolicy_CLEANPOD_POLICY_UNDEFINED + return CleanPodPolicy_CLEANPOD_POLICY_ALL } func (m *RunPolicy) GetTtlSecondsAfterFinished() int32 { @@ -183,7 +156,6 @@ func (m *RunPolicy) GetBackoffLimit() int32 { } func init() { - proto.RegisterEnum("flyteidl.plugins.kubeflow.SuccessPolicy", SuccessPolicy_name, SuccessPolicy_value) proto.RegisterEnum("flyteidl.plugins.kubeflow.CleanPodPolicy", CleanPodPolicy_name, CleanPodPolicy_value) proto.RegisterEnum("flyteidl.plugins.kubeflow.RestartPolicy", RestartPolicy_name, RestartPolicy_value) proto.RegisterType((*RunPolicy)(nil), "flyteidl.plugins.kubeflow.RunPolicy") @@ -194,32 +166,29 @@ func init() { } var fileDescriptor_f625aa9156a15090 = []byte{ - // 430 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xed, 0x8b, 0xd3, 0x30, - 0x1c, 0xc7, 0xd7, 0x9d, 0x0a, 0x06, 0x37, 0x42, 0xf4, 0xdc, 0x83, 0x28, 0x87, 0x82, 0x9c, 0x03, - 0x5b, 0x50, 0x41, 0xc4, 0x57, 0x75, 0xcd, 0x64, 0x5c, 0x49, 0x47, 0xba, 0x7a, 0x9c, 0x6f, 0x42, - 0x9b, 0xa6, 0xbd, 0x70, 0x59, 0x53, 0xd6, 0x54, 0xb9, 0x57, 0xfe, 0xdb, 0xbe, 0x94, 0xf6, 0x3a, - 0xe5, 0x8a, 0xf7, 0x32, 0xbf, 0xcf, 0xe7, 0xf7, 0x40, 0xf8, 0x82, 0xd7, 0x99, 0xba, 0x36, 0x42, - 0xa6, 0xca, 0x29, 0x55, 0x9d, 0xcb, 0xa2, 0x72, 0xae, 0xea, 0x44, 0x64, 0x4a, 0xff, 0x74, 0xb8, - 0xde, 0xed, 0x74, 0x61, 0x97, 0x7b, 0x6d, 0x34, 0x9a, 0x1d, 0x3c, 0xbb, 0xf3, 0xec, 0x83, 0xf7, - 0xf2, 0xb7, 0x05, 0x1e, 0xd2, 0xba, 0xd8, 0x68, 0x25, 0xf9, 0x35, 0x0a, 0x01, 0xe4, 0x4a, 0xc4, - 0x05, 0x2b, 0x75, 0xca, 0xca, 0xb6, 0x36, 0xb5, 0x4e, 0xac, 0xd3, 0xf1, 0xbb, 0x37, 0xf6, 0x9d, - 0x33, 0xec, 0x65, 0xd3, 0xb2, 0xd1, 0xe9, 0xcd, 0x10, 0x3a, 0xe6, 0xb7, 0xde, 0xe8, 0x33, 0x98, - 0x1b, 0xa3, 0x58, 0x25, 0xb8, 0x2e, 0xd2, 0x8a, 0xc5, 0x99, 0x11, 0x7b, 0x96, 0xc9, 0x42, 0x56, - 0x97, 0x22, 0x9d, 0x0e, 0x4f, 0xac, 0xd3, 0xfb, 0x74, 0x62, 0x8c, 0x0a, 0x6f, 0x04, 0xb7, 0xe1, - 0xab, 0x0e, 0xa3, 0x0f, 0xe0, 0x38, 0xe6, 0x46, 0xfe, 0x10, 0x9e, 0x88, 0x53, 0x25, 0x0b, 0xd1, - 0x59, 0xd3, 0xa3, 0xb6, 0xef, 0xff, 0x10, 0xbd, 0x02, 0xa3, 0x24, 0xe6, 0x57, 0x3a, 0xcb, 0x98, - 0x92, 0x3b, 0x69, 0xa6, 0xf7, 0x5a, 0xfb, 0x51, 0x57, 0xf4, 0x9b, 0xda, 0xe2, 0x0c, 0x8c, 0xc2, - 0x9a, 0x73, 0x51, 0x55, 0xdd, 0xa1, 0x73, 0xf0, 0x34, 0x8c, 0x96, 0x4b, 0x1c, 0x86, 0x6c, 0x13, - 0xf8, 0xeb, 0xe5, 0x05, 0xf3, 0xf0, 0xca, 0x8d, 0xfc, 0x2d, 0x1c, 0xa0, 0x17, 0x60, 0xde, 0x63, - 0xae, 0xef, 0xb3, 0xf3, 0x80, 0x9e, 0x61, 0x1a, 0x42, 0x6b, 0xf1, 0x0b, 0x8c, 0x6f, 0x7f, 0x03, - 0x7a, 0x0e, 0x66, 0x4b, 0x1f, 0xbb, 0x64, 0x13, 0x78, 0x87, 0x96, 0x88, 0x78, 0x78, 0xb5, 0x26, - 0xd8, 0x83, 0x03, 0x34, 0x01, 0x8f, 0xfb, 0xd8, 0xf5, 0x7d, 0x68, 0xa1, 0x67, 0x60, 0xd2, 0x07, - 0x34, 0x22, 0x64, 0x4d, 0xbe, 0xc2, 0x21, 0x9a, 0x82, 0x27, 0x7d, 0x48, 0x02, 0x82, 0xe1, 0xd1, - 0x82, 0x83, 0x11, 0x15, 0x95, 0x89, 0xf7, 0xa6, 0xdb, 0x3f, 0x03, 0xc7, 0x14, 0x87, 0x5b, 0x97, - 0x6e, 0xff, 0xcd, 0x3f, 0x77, 0x2f, 0x42, 0x38, 0x68, 0x4e, 0xeb, 0xa1, 0x80, 0xb0, 0x95, 0xbb, - 0xf6, 0x23, 0x8a, 0xa1, 0xd5, 0x2c, 0xe9, 0x61, 0x82, 0xbf, 0x61, 0x0a, 0x87, 0x5f, 0x3e, 0x7d, - 0xff, 0x98, 0x4b, 0x73, 0x59, 0x27, 0x36, 0xd7, 0x3b, 0xa7, 0x4d, 0x84, 0xde, 0xe7, 0xce, 0xdf, - 0x18, 0xe6, 0xa2, 0x70, 0xca, 0xe4, 0x6d, 0xae, 0x9d, 0x7e, 0x32, 0x93, 0x07, 0x6d, 0x14, 0xdf, - 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x7e, 0xee, 0xb1, 0xb4, 0x02, 0x00, 0x00, + // 371 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0xcb, 0xd3, 0x30, + 0x14, 0x86, 0xbf, 0xce, 0x1f, 0x60, 0xf0, 0x1b, 0x25, 0x3a, 0xd6, 0x29, 0xc2, 0x50, 0x90, 0x39, + 0xb0, 0x05, 0x15, 0x44, 0xbc, 0xaa, 0x5b, 0x27, 0x83, 0x92, 0x96, 0x74, 0x53, 0xe6, 0x4d, 0x6c, + 0xd3, 0xb4, 0x0b, 0x4b, 0x93, 0xd2, 0xa6, 0xca, 0xfe, 0x73, 0x2f, 0x65, 0x5d, 0xa7, 0xac, 0xe8, + 0x65, 0xcf, 0xf3, 0x9c, 0xf7, 0x14, 0xf2, 0x82, 0x97, 0x99, 0x38, 0x6a, 0xc6, 0x53, 0xe1, 0x94, + 0xa2, 0xc9, 0xb9, 0xac, 0x9d, 0x43, 0x93, 0xb0, 0x4c, 0xa8, 0x9f, 0x0e, 0x55, 0x45, 0xa1, 0xa4, + 0x5d, 0x56, 0x4a, 0x2b, 0x38, 0xb9, 0x78, 0x76, 0xe7, 0xd9, 0x17, 0xef, 0xf9, 0x2f, 0x03, 0x3c, + 0xc0, 0x8d, 0x0c, 0x95, 0xe0, 0xf4, 0x08, 0x23, 0x60, 0x52, 0xc1, 0x62, 0x49, 0x4a, 0x95, 0x92, + 0xb2, 0x9d, 0x59, 0xc6, 0xd4, 0x98, 0x0d, 0xdf, 0xbc, 0xb2, 0xff, 0x9b, 0x61, 0x2f, 0x4e, 0x2b, + 0xa1, 0x4a, 0xcf, 0x21, 0x78, 0x48, 0xaf, 0xbe, 0xe1, 0x47, 0xf0, 0x44, 0x6b, 0x41, 0x6a, 0x46, + 0x95, 0x4c, 0x6b, 0x12, 0x67, 0x9a, 0x55, 0x24, 0xe3, 0x92, 0xd7, 0x7b, 0x96, 0x5a, 0x83, 0xa9, + 0x31, 0xbb, 0x87, 0xc7, 0x5a, 0x8b, 0xe8, 0x2c, 0xb8, 0x27, 0xbe, 0xea, 0x30, 0x7c, 0x07, 0x46, + 0x31, 0xd5, 0xfc, 0x07, 0x5b, 0xb2, 0x38, 0x15, 0x5c, 0xb2, 0xce, 0xb2, 0xee, 0xb4, 0x7b, 0xff, + 0x86, 0xf0, 0x05, 0xb8, 0x4d, 0x62, 0x7a, 0x50, 0x59, 0x46, 0x04, 0x2f, 0xb8, 0xb6, 0xee, 0xb6, + 0xf6, 0xc3, 0x6e, 0xe8, 0x9f, 0x66, 0xf3, 0xef, 0x60, 0x78, 0xfd, 0xe7, 0x70, 0x0c, 0x1e, 0x2d, + 0x7c, 0xcf, 0x45, 0x61, 0xb0, 0x24, 0x61, 0xe0, 0xaf, 0x17, 0x3b, 0xe2, 0xfa, 0xbe, 0x79, 0x03, + 0x9f, 0x82, 0x71, 0x1f, 0xe0, 0x2d, 0x42, 0x6b, 0xf4, 0xd9, 0x34, 0xa0, 0x05, 0x1e, 0xf7, 0x21, + 0x0a, 0x90, 0x67, 0x0e, 0xe6, 0x14, 0xdc, 0x62, 0x56, 0xeb, 0xb8, 0xd2, 0xdd, 0x81, 0x09, 0x18, + 0x61, 0x2f, 0xda, 0xb8, 0x78, 0xf3, 0x37, 0xff, 0xab, 0xbb, 0x8b, 0xcc, 0x1b, 0xf8, 0x0c, 0x4c, + 0x7a, 0x28, 0x40, 0x64, 0xe5, 0xae, 0xfd, 0x2d, 0xf6, 0xce, 0x47, 0x7a, 0x18, 0x79, 0x5f, 0x3c, + 0x6c, 0x0e, 0x3e, 0x7d, 0xf8, 0xf6, 0x3e, 0xe7, 0x7a, 0xdf, 0x24, 0x36, 0x55, 0x85, 0xd3, 0xbe, + 0x92, 0xaa, 0x72, 0xe7, 0x4f, 0x35, 0x72, 0x26, 0x9d, 0x32, 0x79, 0x9d, 0x2b, 0xa7, 0xdf, 0x96, + 0xe4, 0x7e, 0x5b, 0x8f, 0xb7, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x72, 0xf7, 0xb4, 0xd8, 0x48, + 0x02, 0x00, 0x00, } diff --git a/gen/pb-go/flyteidl/plugins/kubeflow/mpi.pb.go b/gen/pb-go/flyteidl/plugins/kubeflow/mpi.pb.go index 16a512027..bc7ea58a8 100644 --- a/gen/pb-go/flyteidl/plugins/kubeflow/mpi.pb.go +++ b/gen/pb-go/flyteidl/plugins/kubeflow/mpi.pb.go @@ -5,6 +5,7 @@ package plugins import ( fmt "fmt" + core "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" proto "github.com/golang/protobuf/proto" math "math" ) @@ -29,12 +30,10 @@ type DistributedMPITrainingTask struct { // RunPolicy encapsulates various runtime policies of the distributed training // job, for example how to clean up resources and how long the job can stay // active. - RunPolicy *RunPolicy `protobuf:"bytes,3,opt,name=run_policy,json=runPolicy,proto3" json:"run_policy,omitempty"` - // SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None. - SuccessPolicy SuccessPolicy `protobuf:"varint,4,opt,name=success_policy,json=successPolicy,proto3,enum=flyteidl.plugins.kubeflow.SuccessPolicy" json:"success_policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RunPolicy *RunPolicy `protobuf:"bytes,3,opt,name=run_policy,json=runPolicy,proto3" json:"run_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DistributedMPITrainingTask) Reset() { *m = DistributedMPITrainingTask{} } @@ -83,21 +82,15 @@ func (m *DistributedMPITrainingTask) GetRunPolicy() *RunPolicy { return nil } -func (m *DistributedMPITrainingTask) GetSuccessPolicy() SuccessPolicy { - if m != nil { - return m.SuccessPolicy - } - return SuccessPolicy_SUCCESS_POLICY_DEFAULT -} - type DistributedMPITrainingReplicaSpec struct { - // Number of workers + // Number of replicas Replicas int32 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` - // Unique name of a PodTemplate k8s resource to be used as the base configuration. - // PodTemplate specified here will be overriden by the pod template specified at the task metedata level. - PodTemplateName string `protobuf:"bytes,2,opt,name=pod_template_name,json=podTemplateName,proto3" json:"pod_template_name,omitempty"` - // Restart policy for the worker - RestartPolicy RestartPolicy `protobuf:"varint,3,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl.plugins.kubeflow.RestartPolicy" json:"restart_policy,omitempty"` + // Image used for the replica group + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + // Resources required for the replica group + Resources *core.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + // RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows: + RestartPolicy RestartPolicy `protobuf:"varint,4,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl.plugins.kubeflow.RestartPolicy" json:"restart_policy,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -135,13 +128,20 @@ func (m *DistributedMPITrainingReplicaSpec) GetReplicas() int32 { return 0 } -func (m *DistributedMPITrainingReplicaSpec) GetPodTemplateName() string { +func (m *DistributedMPITrainingReplicaSpec) GetImage() string { if m != nil { - return m.PodTemplateName + return m.Image } return "" } +func (m *DistributedMPITrainingReplicaSpec) GetResources() *core.Resources { + if m != nil { + return m.Resources + } + return nil +} + func (m *DistributedMPITrainingReplicaSpec) GetRestartPolicy() RestartPolicy { if m != nil { return m.RestartPolicy @@ -160,26 +160,26 @@ func init() { var fileDescriptor_298b02c608b0cddf = []byte{ // 345 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xcb, 0x4b, 0xc3, 0x40, - 0x10, 0xc6, 0x89, 0x55, 0xb1, 0x2b, 0xb6, 0x36, 0xa7, 0xda, 0x53, 0xad, 0x22, 0x45, 0x30, 0x81, - 0x7a, 0x10, 0xc1, 0x93, 0x7a, 0xf1, 0xa0, 0x96, 0xb4, 0x27, 0x2f, 0x61, 0xb3, 0x99, 0xa6, 0x4b, - 0xf7, 0xc5, 0x3e, 0x28, 0xbd, 0xfb, 0x67, 0xf9, 0xc7, 0x89, 0x79, 0x94, 0x28, 0xb6, 0x5e, 0xbc, - 0x65, 0x86, 0x6f, 0x7e, 0xdf, 0x64, 0xf6, 0x43, 0x67, 0x33, 0xb6, 0xb2, 0x40, 0x53, 0x16, 0x2a, - 0xe6, 0x32, 0x2a, 0x4c, 0xb8, 0x70, 0x09, 0xcc, 0x98, 0x5c, 0x86, 0x5c, 0xd1, 0x40, 0x69, 0x69, - 0xa5, 0x7f, 0x52, 0x89, 0x82, 0x52, 0x14, 0x54, 0xa2, 0xde, 0xc5, 0xe6, 0x79, 0x22, 0x39, 0x97, - 0xa2, 0x40, 0x0c, 0xde, 0x1b, 0xa8, 0xf7, 0x48, 0x8d, 0xd5, 0x34, 0x71, 0x16, 0xd2, 0xe7, 0xf1, - 0xd3, 0x54, 0x63, 0x2a, 0xa8, 0xc8, 0xa6, 0xd8, 0x2c, 0x7c, 0x40, 0xed, 0xa5, 0xd4, 0x0b, 0xd0, - 0xb1, 0x06, 0xc5, 0x28, 0xc1, 0xa6, 0xeb, 0xf5, 0xbd, 0xe1, 0xe1, 0xe8, 0x2e, 0xd8, 0xe8, 0x1d, - 0xfc, 0xce, 0x8b, 0x0a, 0xc0, 0x44, 0x01, 0x89, 0x5a, 0x05, 0xb4, 0x6c, 0x19, 0x9f, 0xa2, 0x0e, - 0xc3, 0x4e, 0x90, 0x79, 0xdd, 0x68, 0xe7, 0x1f, 0x8c, 0x8e, 0x2b, 0xec, 0xda, 0xea, 0x01, 0x21, - 0xed, 0x44, 0xac, 0x24, 0xa3, 0x64, 0xd5, 0x6d, 0xe4, 0x1e, 0xe7, 0x5b, 0x3c, 0x22, 0x27, 0xc6, - 0xb9, 0x36, 0x6a, 0xea, 0xea, 0xd3, 0x7f, 0x45, 0x2d, 0xe3, 0x08, 0x01, 0x63, 0x2a, 0xd0, 0x6e, - 0xdf, 0x1b, 0xb6, 0x46, 0xc3, 0x2d, 0xa0, 0x49, 0x31, 0x50, 0xc2, 0x8e, 0x4c, 0xbd, 0x1c, 0x7c, - 0x78, 0xe8, 0xf4, 0xcf, 0xbf, 0xf1, 0x7b, 0xe8, 0xe0, 0xdb, 0x33, 0xec, 0x45, 0xeb, 0xda, 0xbf, - 0x44, 0x1d, 0x25, 0xd3, 0xd8, 0x02, 0x57, 0x0c, 0x5b, 0x88, 0x05, 0xe6, 0x90, 0x9f, 0xb0, 0x19, - 0xb5, 0x95, 0x4c, 0xa7, 0x65, 0xff, 0x05, 0x73, 0xf8, 0x5a, 0x5f, 0x83, 0xb1, 0x58, 0xdb, 0xfa, - 0x1d, 0xb6, 0xaf, 0x1f, 0x15, 0x03, 0xd5, 0xfa, 0xba, 0x5e, 0xde, 0xdf, 0xbe, 0xdd, 0x64, 0xd4, - 0xce, 0x5d, 0x12, 0x10, 0xc9, 0xc3, 0x1c, 0x22, 0x75, 0x16, 0xae, 0x33, 0x98, 0x81, 0x08, 0x55, - 0x72, 0x95, 0xc9, 0xf0, 0x67, 0x2c, 0x93, 0xfd, 0x3c, 0x87, 0xd7, 0x9f, 0x01, 0x00, 0x00, 0xff, - 0xff, 0x25, 0x16, 0x02, 0x0c, 0xf1, 0x02, 0x00, 0x00, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x52, 0x4d, 0x4b, 0x03, 0x31, + 0x10, 0x65, 0xab, 0x15, 0x1b, 0xb1, 0xea, 0xe2, 0xa1, 0xee, 0xa9, 0x56, 0x91, 0x5e, 0xdc, 0x40, + 0x05, 0x45, 0xf0, 0xa4, 0x5e, 0x3c, 0x88, 0x25, 0xf6, 0xe4, 0xa5, 0xec, 0xa6, 0xe9, 0x36, 0x6c, + 0x36, 0x09, 0x93, 0x84, 0xd2, 0xdf, 0xe5, 0xff, 0xf1, 0xb7, 0x88, 0xfb, 0xd5, 0x2a, 0xb6, 0x5e, + 0xbc, 0x65, 0x32, 0x6f, 0xde, 0x9b, 0x97, 0x3c, 0x74, 0x36, 0x15, 0x0b, 0xcb, 0xf8, 0x44, 0x60, + 0x2d, 0x5c, 0xc2, 0xa5, 0xc1, 0xa9, 0x8b, 0xd9, 0x54, 0xa8, 0x39, 0xce, 0x34, 0x0f, 0x35, 0x28, + 0xab, 0xfc, 0x93, 0x0a, 0x14, 0x96, 0xa0, 0xb0, 0x02, 0x05, 0x75, 0x0b, 0x53, 0x05, 0x0c, 0xdb, + 0xc8, 0xa4, 0xa6, 0x98, 0x0a, 0x2e, 0xd6, 0x53, 0x53, 0x95, 0x65, 0x4a, 0x16, 0xb8, 0xde, 0x7b, + 0x03, 0x05, 0x8f, 0xdc, 0x58, 0xe0, 0xb1, 0xb3, 0x6c, 0xf2, 0x3c, 0x7c, 0x1a, 0x41, 0xc4, 0x25, + 0x97, 0xc9, 0x28, 0x32, 0xa9, 0xcf, 0xd0, 0xc1, 0x5c, 0x41, 0xca, 0x60, 0x0c, 0x4c, 0x0b, 0x4e, + 0x23, 0xd3, 0xf1, 0xba, 0x5e, 0x7f, 0x6f, 0x70, 0x17, 0xae, 0x5d, 0x2b, 0xfc, 0x9d, 0x8f, 0x14, + 0x04, 0xaf, 0x9a, 0x51, 0xd2, 0x2e, 0x48, 0xcb, 0x2b, 0xe3, 0x73, 0x74, 0x24, 0x22, 0x27, 0xe9, + 0x6c, 0x55, 0xa8, 0xf1, 0x0f, 0x42, 0x87, 0x15, 0x6d, 0x2d, 0xf5, 0x80, 0x10, 0x38, 0x39, 0xd6, + 0x4a, 0x70, 0xba, 0xe8, 0x6c, 0xe5, 0x1a, 0xe7, 0x1b, 0x34, 0x88, 0x93, 0xc3, 0x1c, 0x4b, 0x5a, + 0x50, 0x1d, 0x7b, 0x1f, 0x1e, 0x3a, 0xfd, 0x53, 0xdc, 0x0f, 0xd0, 0xee, 0xb7, 0x57, 0x6b, 0x92, + 0xba, 0xf6, 0x8f, 0x51, 0x93, 0x67, 0x51, 0xc2, 0x72, 0x97, 0x2d, 0x52, 0x14, 0xfe, 0x35, 0x6a, + 0x01, 0x33, 0xca, 0x01, 0x65, 0xa6, 0xdc, 0xad, 0xb3, 0xdc, 0xed, 0xeb, 0x93, 0x43, 0x52, 0xf5, + 0xc9, 0x12, 0xea, 0xbf, 0xa0, 0x36, 0x30, 0x63, 0x23, 0xb0, 0x95, 0xb1, 0xed, 0xae, 0xd7, 0x6f, + 0x0f, 0xfa, 0x9b, 0x8c, 0x15, 0x03, 0xa5, 0xb9, 0x7d, 0x58, 0x2d, 0xef, 0x6f, 0xdf, 0x6e, 0x12, + 0x6e, 0x67, 0x2e, 0x0e, 0xa9, 0xca, 0x70, 0x4e, 0xa2, 0x20, 0xc1, 0x75, 0xa8, 0x12, 0x26, 0xb1, + 0x8e, 0x2f, 0x13, 0x85, 0x7f, 0xe6, 0x2c, 0xde, 0xc9, 0x83, 0x75, 0xf5, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xd7, 0xae, 0x61, 0xdf, 0xdd, 0x02, 0x00, 0x00, } diff --git a/gen/pb-go/flyteidl/plugins/kubeflow/mpi.pb.validate.go b/gen/pb-go/flyteidl/plugins/kubeflow/mpi.pb.validate.go index 221de272e..feb0bab3d 100644 --- a/gen/pb-go/flyteidl/plugins/kubeflow/mpi.pb.validate.go +++ b/gen/pb-go/flyteidl/plugins/kubeflow/mpi.pb.validate.go @@ -74,8 +74,6 @@ func (m *DistributedMPITrainingTask) Validate() error { } } - // no validation rules for SuccessPolicy - return nil } @@ -145,7 +143,17 @@ func (m *DistributedMPITrainingReplicaSpec) Validate() error { // no validation rules for Replicas - // no validation rules for PodTemplateName + // no validation rules for Image + + if v, ok := interface{}(m.GetResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedMPITrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + } + } + } // no validation rules for RestartPolicy diff --git a/gen/pb-go/flyteidl/plugins/kubeflow/pytorch.pb.go b/gen/pb-go/flyteidl/plugins/kubeflow/pytorch.pb.go index fb5d4edd5..31b8e3ff4 100644 --- a/gen/pb-go/flyteidl/plugins/kubeflow/pytorch.pb.go +++ b/gen/pb-go/flyteidl/plugins/kubeflow/pytorch.pb.go @@ -5,6 +5,7 @@ package plugins import ( fmt "fmt" + core "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" proto "github.com/golang/protobuf/proto" math "math" ) @@ -29,12 +30,10 @@ type DistributedPyTorchTrainingTask struct { // RunPolicy encapsulates various runtime policies of the distributed training // job, for example how to clean up resources and how long the job can stay // active. - RunPolicy *RunPolicy `protobuf:"bytes,3,opt,name=run_policy,json=runPolicy,proto3" json:"run_policy,omitempty"` - // SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None. - SuccessPolicy SuccessPolicy `protobuf:"varint,4,opt,name=success_policy,json=successPolicy,proto3,enum=flyteidl.plugins.kubeflow.SuccessPolicy" json:"success_policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RunPolicy *RunPolicy `protobuf:"bytes,3,opt,name=run_policy,json=runPolicy,proto3" json:"run_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DistributedPyTorchTrainingTask) Reset() { *m = DistributedPyTorchTrainingTask{} } @@ -83,21 +82,15 @@ func (m *DistributedPyTorchTrainingTask) GetRunPolicy() *RunPolicy { return nil } -func (m *DistributedPyTorchTrainingTask) GetSuccessPolicy() SuccessPolicy { - if m != nil { - return m.SuccessPolicy - } - return SuccessPolicy_SUCCESS_POLICY_DEFAULT -} - type DistributedPyTorchTrainingReplicaSpec struct { - // Number of workers + // Number of replicas Replicas int32 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` - // Unique name of a PodTemplate k8s resource to be used as the base configuration. - // PodTemplate specified here will be overriden by the pod template specified at the task metedata level. - PodTemplateName string `protobuf:"bytes,2,opt,name=pod_template_name,json=podTemplateName,proto3" json:"pod_template_name,omitempty"` - // Restart policy for the worker - RestartPolicy RestartPolicy `protobuf:"varint,3,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl.plugins.kubeflow.RestartPolicy" json:"restart_policy,omitempty"` + // Image used for the replica group + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + // Resources required for the replica group + Resources *core.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + // RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows: + RestartPolicy RestartPolicy `protobuf:"varint,4,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl.plugins.kubeflow.RestartPolicy" json:"restart_policy,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -135,13 +128,20 @@ func (m *DistributedPyTorchTrainingReplicaSpec) GetReplicas() int32 { return 0 } -func (m *DistributedPyTorchTrainingReplicaSpec) GetPodTemplateName() string { +func (m *DistributedPyTorchTrainingReplicaSpec) GetImage() string { if m != nil { - return m.PodTemplateName + return m.Image } return "" } +func (m *DistributedPyTorchTrainingReplicaSpec) GetResources() *core.Resources { + if m != nil { + return m.Resources + } + return nil +} + func (m *DistributedPyTorchTrainingReplicaSpec) GetRestartPolicy() RestartPolicy { if m != nil { return m.RestartPolicy @@ -159,27 +159,27 @@ func init() { } var fileDescriptor_37e97bee6e09d707 = []byte{ - // 346 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xcb, 0x4a, 0xfb, 0x40, - 0x14, 0xc6, 0xc9, 0xbf, 0x7f, 0xc5, 0x8e, 0xd8, 0x62, 0x56, 0xb5, 0x0b, 0x29, 0xc5, 0x4b, 0x11, - 0xcc, 0x40, 0x5d, 0x88, 0x3b, 0x51, 0xd7, 0x5a, 0xa6, 0x59, 0xb9, 0x09, 0x93, 0xc9, 0x34, 0x1d, - 0x3a, 0x37, 0xe6, 0x42, 0xc9, 0x3b, 0xf8, 0x50, 0x3e, 0x9a, 0x34, 0x97, 0x12, 0x85, 0x16, 0x17, - 0xee, 0xe6, 0x1c, 0xbe, 0xf3, 0xfb, 0xf8, 0xce, 0x1c, 0x70, 0xbd, 0xe0, 0x85, 0xa3, 0x2c, 0xe3, - 0x50, 0x73, 0x9f, 0x33, 0x69, 0xe1, 0xca, 0xa7, 0x74, 0xc1, 0xd5, 0x1a, 0xea, 0xc2, 0x29, 0x43, - 0x96, 0x91, 0x36, 0xca, 0xa9, 0xf0, 0xac, 0x11, 0x46, 0xb5, 0x30, 0x6a, 0x84, 0xc3, 0xab, 0xdd, - 0x0c, 0xa2, 0x84, 0x50, 0xb2, 0x42, 0x8c, 0x3f, 0x3a, 0xe0, 0xfc, 0x85, 0x59, 0x67, 0x58, 0xea, - 0x1d, 0xcd, 0x66, 0x45, 0xbc, 0xe1, 0xc7, 0x06, 0x33, 0xc9, 0x64, 0x1e, 0x63, 0xbb, 0x0a, 0x19, - 0xe8, 0xaf, 0x95, 0x59, 0x51, 0x93, 0x18, 0xaa, 0x39, 0x23, 0xd8, 0x0e, 0x82, 0x51, 0x30, 0x39, - 0x9e, 0x3e, 0x46, 0x3b, 0xfd, 0xa3, 0xdd, 0x4c, 0x54, 0x41, 0xe6, 0x9a, 0x12, 0xd4, 0xab, 0xc0, - 0x75, 0xcb, 0x6e, 0xac, 0x04, 0xb6, 0xae, 0x6d, 0xf5, 0xef, 0xaf, 0xac, 0x2a, 0xf0, 0xd6, 0xea, - 0x19, 0x00, 0xe3, 0x65, 0xa2, 0x15, 0x67, 0xa4, 0x18, 0x74, 0x4a, 0x97, 0x8b, 0x3d, 0x2e, 0xc8, - 0xcb, 0x59, 0xa9, 0x45, 0x5d, 0xd3, 0x3c, 0xc3, 0x37, 0xd0, 0xb3, 0x9e, 0x10, 0x6a, 0x6d, 0x03, - 0xfa, 0x3f, 0x0a, 0x26, 0xbd, 0xe9, 0x64, 0x0f, 0x68, 0x5e, 0x0d, 0xd4, 0xb0, 0x13, 0xdb, 0x2e, - 0xc7, 0x9f, 0x01, 0xb8, 0xfc, 0x55, 0x9e, 0x70, 0x08, 0x8e, 0xbe, 0x7d, 0xc7, 0x01, 0xda, 0xd6, - 0xe1, 0x0d, 0x38, 0xd5, 0x2a, 0x4b, 0x1c, 0x15, 0x9a, 0x63, 0x47, 0x13, 0x89, 0x05, 0x2d, 0x17, - 0xd9, 0x45, 0x7d, 0xad, 0xb2, 0xb8, 0xee, 0xbf, 0x62, 0x41, 0x37, 0x11, 0x0c, 0xb5, 0x0e, 0x1b, - 0xd7, 0xde, 0xc5, 0xfe, 0x08, 0xa8, 0x1a, 0x68, 0x22, 0x98, 0x76, 0xf9, 0xf4, 0xf0, 0x7e, 0x9f, - 0x33, 0xb7, 0xf4, 0x69, 0x44, 0x94, 0x80, 0x25, 0x44, 0x99, 0x1c, 0x6e, 0xef, 0x31, 0xa7, 0x12, - 0xea, 0xf4, 0x36, 0x57, 0xf0, 0xe7, 0x89, 0xa6, 0x87, 0xe5, 0x4d, 0xde, 0x7d, 0x05, 0x00, 0x00, - 0xff, 0xff, 0x25, 0xd3, 0x11, 0xab, 0x01, 0x03, 0x00, 0x00, + // 348 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0x4f, 0x4b, 0xf3, 0x40, + 0x10, 0xc6, 0x49, 0xdf, 0xb7, 0x62, 0x57, 0xac, 0x10, 0x3c, 0xd4, 0x1e, 0xa4, 0x14, 0xff, 0xf4, + 0x62, 0x16, 0x2a, 0x28, 0xde, 0x44, 0xbd, 0x5b, 0xd6, 0x9e, 0xbc, 0x94, 0x64, 0xbb, 0xdd, 0x2e, + 0x49, 0x76, 0x97, 0xd9, 0x5d, 0x4a, 0x3e, 0x9b, 0xdf, 0xc6, 0x4f, 0x22, 0xcd, 0x26, 0x69, 0x15, + 0x5a, 0x3c, 0x78, 0xcb, 0x64, 0x9e, 0x79, 0x7e, 0xf3, 0x24, 0x83, 0xae, 0x17, 0x59, 0x61, 0x99, + 0x98, 0x67, 0x58, 0x67, 0x8e, 0x0b, 0x69, 0x70, 0xea, 0x12, 0xb6, 0xc8, 0xd4, 0x0a, 0xeb, 0xc2, + 0x2a, 0xa0, 0xcb, 0x48, 0x83, 0xb2, 0x2a, 0x3c, 0xab, 0x85, 0x51, 0x25, 0x8c, 0x6a, 0x61, 0xbf, + 0x69, 0x61, 0xaa, 0x80, 0x61, 0x1b, 0x9b, 0xd4, 0xf8, 0xa9, 0xfe, 0xd5, 0x6e, 0x7b, 0xaa, 0xf2, + 0x5c, 0x49, 0xaf, 0x1b, 0x7e, 0xb4, 0xd0, 0xf9, 0x8b, 0x30, 0x16, 0x44, 0xe2, 0x2c, 0x9b, 0x4f, + 0x8a, 0xe9, 0x1a, 0x3d, 0x85, 0x58, 0x48, 0x21, 0xf9, 0x34, 0x36, 0x69, 0x28, 0xd0, 0xc9, 0x4a, + 0x41, 0xca, 0x60, 0x06, 0x4c, 0x67, 0x82, 0xc6, 0xa6, 0x17, 0x0c, 0x82, 0xd1, 0xd1, 0xf8, 0x31, + 0xda, 0xb9, 0x5a, 0xb4, 0xdb, 0x93, 0x78, 0x93, 0x37, 0xcd, 0x28, 0xe9, 0x7a, 0xe3, 0xea, 0x95, + 0x59, 0xa3, 0xf2, 0xd8, 0xd8, 0x6d, 0x54, 0xeb, 0xaf, 0x50, 0xde, 0xb8, 0x41, 0x3d, 0x23, 0x04, + 0x4e, 0xce, 0xb4, 0xca, 0x04, 0x2d, 0x7a, 0xff, 0x4a, 0xca, 0xc5, 0x1e, 0x0a, 0x71, 0x72, 0x52, + 0x6a, 0x49, 0x07, 0xea, 0xc7, 0xe1, 0x67, 0x80, 0x2e, 0x7f, 0x85, 0x0f, 0xfb, 0xe8, 0xf0, 0xdb, + 0xd7, 0x6b, 0x93, 0xa6, 0x0e, 0x4f, 0x51, 0x5b, 0xe4, 0x31, 0x67, 0x65, 0xd6, 0x0e, 0xf1, 0x45, + 0x78, 0x87, 0x3a, 0xc0, 0x8c, 0x72, 0x40, 0x99, 0xa9, 0xf6, 0xeb, 0x6d, 0xf6, 0x5b, 0xff, 0xf0, + 0x88, 0xd4, 0x7d, 0xb2, 0x91, 0x86, 0xaf, 0xa8, 0x0b, 0xcc, 0xd8, 0x18, 0x6c, 0x1d, 0xee, 0xff, + 0x20, 0x18, 0x75, 0xc7, 0xa3, 0x7d, 0xe1, 0xfc, 0x40, 0x15, 0xf0, 0x18, 0xb6, 0xcb, 0xa7, 0x87, + 0xf7, 0x7b, 0x2e, 0xec, 0xd2, 0x25, 0x11, 0x55, 0x39, 0x2e, 0x4d, 0x14, 0x70, 0xdc, 0x1c, 0x18, + 0x67, 0x12, 0xeb, 0xe4, 0x86, 0x2b, 0xfc, 0xf3, 0xe6, 0x92, 0x83, 0xf2, 0xc8, 0x6e, 0xbf, 0x02, + 0x00, 0x00, 0xff, 0xff, 0xb0, 0xf9, 0xaa, 0xc3, 0xed, 0x02, 0x00, 0x00, } diff --git a/gen/pb-go/flyteidl/plugins/kubeflow/pytorch.pb.validate.go b/gen/pb-go/flyteidl/plugins/kubeflow/pytorch.pb.validate.go index 7724050ad..31287bccc 100644 --- a/gen/pb-go/flyteidl/plugins/kubeflow/pytorch.pb.validate.go +++ b/gen/pb-go/flyteidl/plugins/kubeflow/pytorch.pb.validate.go @@ -74,8 +74,6 @@ func (m *DistributedPyTorchTrainingTask) Validate() error { } } - // no validation rules for SuccessPolicy - return nil } @@ -146,7 +144,17 @@ func (m *DistributedPyTorchTrainingReplicaSpec) Validate() error { // no validation rules for Replicas - // no validation rules for PodTemplateName + // no validation rules for Image + + if v, ok := interface{}(m.GetResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedPyTorchTrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + } + } + } // no validation rules for RestartPolicy diff --git a/gen/pb-go/flyteidl/plugins/kubeflow/tensorflow.pb.go b/gen/pb-go/flyteidl/plugins/kubeflow/tensorflow.pb.go index 519606cab..dfd9bdd56 100644 --- a/gen/pb-go/flyteidl/plugins/kubeflow/tensorflow.pb.go +++ b/gen/pb-go/flyteidl/plugins/kubeflow/tensorflow.pb.go @@ -5,6 +5,7 @@ package plugins import ( fmt "fmt" + core "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" proto "github.com/golang/protobuf/proto" math "math" ) @@ -31,12 +32,10 @@ type DistributedTensorflowTrainingTask struct { // RunPolicy encapsulates various runtime policies of the distributed training // job, for example how to clean up resources and how long the job can stay // active. - RunPolicy *RunPolicy `protobuf:"bytes,4,opt,name=run_policy,json=runPolicy,proto3" json:"run_policy,omitempty"` - // SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None. - SuccessPolicy SuccessPolicy `protobuf:"varint,5,opt,name=success_policy,json=successPolicy,proto3,enum=flyteidl.plugins.kubeflow.SuccessPolicy" json:"success_policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RunPolicy *RunPolicy `protobuf:"bytes,4,opt,name=run_policy,json=runPolicy,proto3" json:"run_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DistributedTensorflowTrainingTask) Reset() { *m = DistributedTensorflowTrainingTask{} } @@ -92,21 +91,15 @@ func (m *DistributedTensorflowTrainingTask) GetRunPolicy() *RunPolicy { return nil } -func (m *DistributedTensorflowTrainingTask) GetSuccessPolicy() SuccessPolicy { - if m != nil { - return m.SuccessPolicy - } - return SuccessPolicy_SUCCESS_POLICY_DEFAULT -} - type DistributedTensorflowTrainingReplicaSpec struct { - // Number of workers + // Number of replicas Replicas int32 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` - // Unique name of a PodTemplate k8s resource to be used as the base configuration. - // PodTemplate specified here will be overriden by the pod template specified at the task metedata level. - PodTemplateName string `protobuf:"bytes,2,opt,name=pod_template_name,json=podTemplateName,proto3" json:"pod_template_name,omitempty"` - // Restart policy for the worker - RestartPolicy RestartPolicy `protobuf:"varint,3,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl.plugins.kubeflow.RestartPolicy" json:"restart_policy,omitempty"` + // Image used for the replica group + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + // Resources required for the replica group + Resources *core.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + // RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows: + RestartPolicy RestartPolicy `protobuf:"varint,4,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl.plugins.kubeflow.RestartPolicy" json:"restart_policy,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -146,13 +139,20 @@ func (m *DistributedTensorflowTrainingReplicaSpec) GetReplicas() int32 { return 0 } -func (m *DistributedTensorflowTrainingReplicaSpec) GetPodTemplateName() string { +func (m *DistributedTensorflowTrainingReplicaSpec) GetImage() string { if m != nil { - return m.PodTemplateName + return m.Image } return "" } +func (m *DistributedTensorflowTrainingReplicaSpec) GetResources() *core.Resources { + if m != nil { + return m.Resources + } + return nil +} + func (m *DistributedTensorflowTrainingReplicaSpec) GetRestartPolicy() RestartPolicy { if m != nil { return m.RestartPolicy @@ -170,28 +170,28 @@ func init() { } var fileDescriptor_93de2bd764ddf01a = []byte{ - // 360 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0x4d, 0x4f, 0xfa, 0x30, - 0x1c, 0xc7, 0xb3, 0x3f, 0x7f, 0x8c, 0x94, 0x30, 0xe2, 0x4e, 0xc8, 0x09, 0x89, 0x31, 0x0b, 0x89, - 0x5b, 0x82, 0x07, 0xe3, 0x55, 0x3c, 0xab, 0x19, 0x3b, 0x79, 0x59, 0xb6, 0xee, 0xc7, 0xa8, 0x74, - 0x6d, 0xd3, 0x87, 0x10, 0x5e, 0x9f, 0xaf, 0xca, 0x9b, 0x61, 0x4f, 0xa0, 0x09, 0xe8, 0x81, 0x5b, - 0x7f, 0xcd, 0xb7, 0x9f, 0xcf, 0xbe, 0xe9, 0x8a, 0x26, 0x0b, 0xba, 0xd1, 0x40, 0x52, 0xea, 0x0b, - 0x6a, 0x32, 0xc2, 0x94, 0xbf, 0x32, 0x09, 0x2c, 0x28, 0x5f, 0xfb, 0x1a, 0x98, 0xe2, 0x72, 0xbb, - 0xf4, 0x84, 0xe4, 0x9a, 0x3b, 0x97, 0x75, 0xd6, 0xab, 0xb2, 0x5e, 0x9d, 0x1d, 0xde, 0x1c, 0xc6, - 0x60, 0x9e, 0xe7, 0x9c, 0x95, 0x88, 0xf1, 0x67, 0x0b, 0x5d, 0x3d, 0x11, 0xa5, 0x25, 0x49, 0x8c, - 0x86, 0x34, 0x6c, 0x14, 0xa1, 0x8c, 0x09, 0x23, 0x2c, 0x0b, 0x63, 0xb5, 0x72, 0x28, 0xea, 0xaf, - 0xb9, 0x5c, 0x81, 0x8c, 0x24, 0x08, 0x4a, 0x70, 0xac, 0x06, 0xd6, 0xc8, 0x72, 0xbb, 0xd3, 0x99, - 0x77, 0xf0, 0x13, 0xbc, 0xa3, 0xd8, 0xa0, 0xe4, 0xcc, 0x05, 0xe0, 0xc0, 0x2e, 0xd9, 0xd5, 0x96, - 0x72, 0x52, 0xd4, 0x15, 0x6a, 0x67, 0xfa, 0x77, 0x3a, 0x13, 0x12, 0xaa, 0xb1, 0xbc, 0x23, 0x1b, - 0x2f, 0x09, 0x2c, 0x76, 0xa2, 0xd6, 0xe9, 0x44, 0xbd, 0x02, 0xdd, 0xb8, 0x66, 0x08, 0x49, 0xc3, - 0x22, 0xc1, 0x29, 0xc1, 0x9b, 0xc1, 0xff, 0xc2, 0x73, 0x7d, 0xc4, 0x13, 0x18, 0xf6, 0x5a, 0x64, - 0x83, 0x8e, 0xac, 0x97, 0xce, 0x0b, 0xb2, 0x95, 0xc1, 0x18, 0x94, 0xaa, 0x41, 0xed, 0x91, 0xe5, - 0xda, 0x53, 0xf7, 0x08, 0x68, 0x5e, 0x1e, 0xa8, 0x60, 0x3d, 0xb5, 0x3f, 0x8e, 0x3f, 0x2c, 0xe4, - 0xfe, 0xb5, 0x91, 0x33, 0x44, 0xe7, 0xdf, 0xee, 0xbe, 0x1d, 0x34, 0xb3, 0x33, 0x41, 0x17, 0x82, - 0xa7, 0x91, 0x86, 0x5c, 0xd0, 0x58, 0x43, 0xc4, 0xe2, 0x1c, 0x8a, 0x6b, 0xeb, 0x04, 0x7d, 0xc1, - 0xd3, 0xb0, 0xda, 0x7f, 0x8e, 0x73, 0xd8, 0xb6, 0x90, 0xa0, 0x74, 0x2c, 0x75, 0xdd, 0xa2, 0xf5, - 0x6b, 0x8b, 0xa0, 0x3c, 0x50, 0xb7, 0x90, 0xfb, 0xe3, 0xe3, 0xc3, 0xdb, 0x7d, 0x46, 0xf4, 0xd2, - 0x24, 0x1e, 0xe6, 0xb9, 0x5f, 0x40, 0xb8, 0xcc, 0xfc, 0xe6, 0xff, 0xcf, 0x80, 0xf9, 0x22, 0xb9, - 0xcd, 0xb8, 0xff, 0xf3, 0x49, 0x24, 0x67, 0xc5, 0x1b, 0xb8, 0xfb, 0x0a, 0x00, 0x00, 0xff, 0xff, - 0xbf, 0xb4, 0x0f, 0x89, 0x74, 0x03, 0x00, 0x00, + // 356 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0x41, 0x4b, 0xc3, 0x30, + 0x14, 0xc7, 0x99, 0x73, 0xe2, 0x32, 0x56, 0xa1, 0x78, 0x98, 0x3b, 0xe9, 0x10, 0x19, 0x82, 0x0d, + 0x4c, 0x50, 0xbc, 0x3a, 0xef, 0x4a, 0xdc, 0xc9, 0xcb, 0x68, 0xb3, 0xb7, 0x2e, 0xb6, 0x4d, 0xc2, + 0x4b, 0xc2, 0xd8, 0x37, 0xf2, 0x8b, 0xf9, 0x3d, 0x64, 0xed, 0xda, 0x4e, 0x61, 0xc3, 0xc3, 0x6e, + 0x79, 0xcd, 0x3f, 0xff, 0xdf, 0x7b, 0xaf, 0x7f, 0x72, 0x3b, 0x4f, 0x57, 0x16, 0xc4, 0x2c, 0xa5, + 0x3a, 0x75, 0xb1, 0x90, 0x86, 0x26, 0x2e, 0x82, 0x79, 0xaa, 0x96, 0xd4, 0x82, 0x34, 0x0a, 0xd7, + 0xc7, 0x40, 0xa3, 0xb2, 0xca, 0xbf, 0x28, 0xb5, 0xc1, 0x46, 0x1b, 0x94, 0xda, 0x7e, 0x75, 0x45, + 0xb9, 0x42, 0xa0, 0x36, 0x34, 0x89, 0x29, 0x5e, 0xf5, 0x6f, 0x76, 0x13, 0xb8, 0xca, 0x32, 0x25, + 0x0b, 0xdd, 0xe0, 0xab, 0x49, 0xae, 0x5e, 0x84, 0xb1, 0x28, 0x22, 0x67, 0x61, 0x36, 0xa9, 0xe8, + 0x13, 0x0c, 0x85, 0x14, 0x32, 0x9e, 0x84, 0x26, 0xf1, 0x53, 0x72, 0xb6, 0x54, 0x98, 0x00, 0x4e, + 0x11, 0x74, 0x2a, 0x78, 0x68, 0x7a, 0x8d, 0xcb, 0xc6, 0xb0, 0x33, 0x1a, 0x07, 0x3b, 0xbb, 0x0b, + 0xf6, 0xda, 0xb2, 0xc2, 0xe7, 0x5d, 0x03, 0x67, 0x5e, 0xe1, 0xbd, 0xf9, 0x64, 0xfc, 0x19, 0xe9, + 0x68, 0x53, 0x93, 0x8e, 0x0e, 0x47, 0x22, 0xda, 0x54, 0x94, 0x4f, 0xe2, 0xf1, 0x85, 0x80, 0x79, + 0x0d, 0x6a, 0x1e, 0x0e, 0xd4, 0xcd, 0xad, 0x2b, 0xd6, 0x98, 0x10, 0x74, 0x72, 0xaa, 0x55, 0x2a, + 0xf8, 0xaa, 0x77, 0x9c, 0x73, 0xae, 0xf7, 0x70, 0x98, 0x93, 0x6f, 0xb9, 0x96, 0xb5, 0xb1, 0x3c, + 0x0e, 0xbe, 0x1b, 0x64, 0xf8, 0xdf, 0x06, 0xfc, 0x3e, 0x39, 0xfd, 0xf5, 0xab, 0x5a, 0xac, 0xaa, + 0xfd, 0x73, 0xd2, 0x12, 0x59, 0x18, 0x43, 0xbe, 0xd9, 0x36, 0x2b, 0x0a, 0xff, 0x81, 0xb4, 0x11, + 0x8c, 0x72, 0xc8, 0xa1, 0x5c, 0x45, 0xaf, 0x6e, 0x71, 0x1d, 0xb0, 0x80, 0x95, 0xf7, 0xac, 0x96, + 0xfa, 0xaf, 0xc4, 0x43, 0x30, 0x36, 0x44, 0xbb, 0x3d, 0x9f, 0x37, 0x1a, 0xee, 0x9b, 0xaf, 0x78, + 0xb0, 0x99, 0xb1, 0x8b, 0xdb, 0xe5, 0xf3, 0xd3, 0xc7, 0x63, 0x2c, 0xec, 0xc2, 0x45, 0x01, 0x57, + 0x19, 0xcd, 0x4d, 0x14, 0xc6, 0xb4, 0x0a, 0x74, 0x0c, 0x92, 0xea, 0xe8, 0x2e, 0x56, 0xf4, 0x6f, + 0xc6, 0xa3, 0x93, 0x3c, 0xd4, 0xf7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x01, 0x7d, 0x16, + 0x60, 0x03, 0x00, 0x00, } diff --git a/gen/pb-go/flyteidl/plugins/kubeflow/tensorflow.pb.validate.go b/gen/pb-go/flyteidl/plugins/kubeflow/tensorflow.pb.validate.go index 627f9c0e9..098b4dc7c 100644 --- a/gen/pb-go/flyteidl/plugins/kubeflow/tensorflow.pb.validate.go +++ b/gen/pb-go/flyteidl/plugins/kubeflow/tensorflow.pb.validate.go @@ -84,8 +84,6 @@ func (m *DistributedTensorflowTrainingTask) Validate() error { } } - // no validation rules for SuccessPolicy - return nil } @@ -156,7 +154,17 @@ func (m *DistributedTensorflowTrainingReplicaSpec) Validate() error { // no validation rules for Replicas - // no validation rules for PodTemplateName + // no validation rules for Image + + if v, ok := interface{}(m.GetResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedTensorflowTrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + } + } + } // no validation rules for RestartPolicy diff --git a/gen/pb-go/flyteidl/plugins/mpi.pb.go b/gen/pb-go/flyteidl/plugins/mpi.pb.go new file mode 100644 index 000000000..f09295729 --- /dev/null +++ b/gen/pb-go/flyteidl/plugins/mpi.pb.go @@ -0,0 +1,107 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: flyteidl/plugins/mpi.proto + +package plugins + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator +type DistributedMPITrainingTask struct { + // number of worker spawned in the cluster for this job + NumWorkers int32 `protobuf:"varint,1,opt,name=num_workers,json=numWorkers,proto3" json:"num_workers,omitempty"` + // number of launcher replicas spawned in the cluster for this job + // The launcher pod invokes mpirun and communicates with worker pods through MPI. + NumLauncherReplicas int32 `protobuf:"varint,2,opt,name=num_launcher_replicas,json=numLauncherReplicas,proto3" json:"num_launcher_replicas,omitempty"` + // number of slots per worker used in hostfile. + // The available slots (GPUs) in each pod. + Slots int32 `protobuf:"varint,3,opt,name=slots,proto3" json:"slots,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributedMPITrainingTask) Reset() { *m = DistributedMPITrainingTask{} } +func (m *DistributedMPITrainingTask) String() string { return proto.CompactTextString(m) } +func (*DistributedMPITrainingTask) ProtoMessage() {} +func (*DistributedMPITrainingTask) Descriptor() ([]byte, []int) { + return fileDescriptor_13cf3fae00e5b069, []int{0} +} + +func (m *DistributedMPITrainingTask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributedMPITrainingTask.Unmarshal(m, b) +} +func (m *DistributedMPITrainingTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributedMPITrainingTask.Marshal(b, m, deterministic) +} +func (m *DistributedMPITrainingTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributedMPITrainingTask.Merge(m, src) +} +func (m *DistributedMPITrainingTask) XXX_Size() int { + return xxx_messageInfo_DistributedMPITrainingTask.Size(m) +} +func (m *DistributedMPITrainingTask) XXX_DiscardUnknown() { + xxx_messageInfo_DistributedMPITrainingTask.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributedMPITrainingTask proto.InternalMessageInfo + +func (m *DistributedMPITrainingTask) GetNumWorkers() int32 { + if m != nil { + return m.NumWorkers + } + return 0 +} + +func (m *DistributedMPITrainingTask) GetNumLauncherReplicas() int32 { + if m != nil { + return m.NumLauncherReplicas + } + return 0 +} + +func (m *DistributedMPITrainingTask) GetSlots() int32 { + if m != nil { + return m.Slots + } + return 0 +} + +func init() { + proto.RegisterType((*DistributedMPITrainingTask)(nil), "flyteidl.plugins.DistributedMPITrainingTask") +} + +func init() { proto.RegisterFile("flyteidl/plugins/mpi.proto", fileDescriptor_13cf3fae00e5b069) } + +var fileDescriptor_13cf3fae00e5b069 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x8f, 0x31, 0x4b, 0xc7, 0x30, + 0x10, 0x47, 0xa9, 0xf2, 0x77, 0x88, 0x8b, 0x44, 0x85, 0xd2, 0x45, 0x71, 0x72, 0xb1, 0x01, 0x1d, + 0xc4, 0x55, 0x5c, 0x04, 0x05, 0x29, 0x05, 0xc1, 0xa5, 0x24, 0x6d, 0x4c, 0x8f, 0x26, 0x97, 0x70, + 0x49, 0x10, 0x3f, 0x81, 0x5f, 0x5b, 0x4c, 0xab, 0x83, 0xe3, 0xdd, 0x7b, 0xc3, 0xef, 0xb1, 0xe6, + 0xdd, 0x7e, 0x26, 0x0d, 0x93, 0x15, 0xc1, 0x66, 0x03, 0x18, 0x85, 0x0b, 0xd0, 0x06, 0xf2, 0xc9, + 0xf3, 0xa3, 0x5f, 0xd6, 0x6e, 0xec, 0xe2, 0xab, 0x62, 0xcd, 0x03, 0xc4, 0x44, 0xa0, 0x72, 0xd2, + 0xd3, 0xf3, 0xcb, 0x63, 0x4f, 0x12, 0x10, 0xd0, 0xf4, 0x32, 0x2e, 0xfc, 0x8c, 0x1d, 0x62, 0x76, + 0xc3, 0x87, 0xa7, 0x45, 0x53, 0xac, 0xab, 0xf3, 0xea, 0x72, 0xd7, 0x31, 0xcc, 0xee, 0x75, 0xfd, + 0xf0, 0x6b, 0x76, 0xfa, 0x23, 0x58, 0x99, 0x71, 0x9c, 0x35, 0x0d, 0xa4, 0x83, 0x85, 0x51, 0xc6, + 0x7a, 0xaf, 0xa8, 0xc7, 0x98, 0xdd, 0xd3, 0xc6, 0xba, 0x0d, 0xf1, 0x13, 0xb6, 0x8b, 0xd6, 0xa7, + 0x58, 0xef, 0x17, 0x67, 0x3d, 0xee, 0xef, 0xde, 0x6e, 0x0d, 0xa4, 0x39, 0xab, 0x76, 0xf4, 0x4e, + 0x94, 0xa1, 0x9e, 0x8c, 0xf8, 0xab, 0x31, 0x1a, 0x45, 0x50, 0x57, 0xc6, 0x8b, 0xff, 0x81, 0xea, + 0xa0, 0xd4, 0xdd, 0x7c, 0x07, 0x00, 0x00, 0xff, 0xff, 0x76, 0x5d, 0x84, 0xbc, 0xfb, 0x00, 0x00, + 0x00, +} diff --git a/gen/pb-go/flyteidl/plugins/mpi.pb.validate.go b/gen/pb-go/flyteidl/plugins/mpi.pb.validate.go new file mode 100644 index 000000000..c0d746eee --- /dev/null +++ b/gen/pb-go/flyteidl/plugins/mpi.pb.validate.go @@ -0,0 +1,110 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl/plugins/mpi.proto + +package plugins + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/ptypes" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = ptypes.DynamicAny{} +) + +// define the regex for a UUID once up-front +var _mpi_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") + +// Validate checks the field values on DistributedMPITrainingTask with the +// rules defined in the proto definition for this message. If any rules are +// violated, an error is returned. +func (m *DistributedMPITrainingTask) Validate() error { + if m == nil { + return nil + } + + // no validation rules for NumWorkers + + // no validation rules for NumLauncherReplicas + + // no validation rules for Slots + + return nil +} + +// DistributedMPITrainingTaskValidationError is the validation error returned +// by DistributedMPITrainingTask.Validate if the designated constraints aren't met. +type DistributedMPITrainingTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedMPITrainingTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedMPITrainingTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedMPITrainingTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedMPITrainingTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedMPITrainingTaskValidationError) ErrorName() string { + return "DistributedMPITrainingTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedMPITrainingTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedMPITrainingTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedMPITrainingTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedMPITrainingTaskValidationError{} diff --git a/gen/pb-go/flyteidl/plugins/pytorch.pb.go b/gen/pb-go/flyteidl/plugins/pytorch.pb.go new file mode 100644 index 000000000..79138e568 --- /dev/null +++ b/gen/pb-go/flyteidl/plugins/pytorch.pb.go @@ -0,0 +1,82 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: flyteidl/plugins/pytorch.proto + +package plugins + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator +type DistributedPyTorchTrainingTask struct { + // number of worker replicas spawned in the cluster for this job + Workers int32 `protobuf:"varint,1,opt,name=workers,proto3" json:"workers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributedPyTorchTrainingTask) Reset() { *m = DistributedPyTorchTrainingTask{} } +func (m *DistributedPyTorchTrainingTask) String() string { return proto.CompactTextString(m) } +func (*DistributedPyTorchTrainingTask) ProtoMessage() {} +func (*DistributedPyTorchTrainingTask) Descriptor() ([]byte, []int) { + return fileDescriptor_4df8a9374b28b766, []int{0} +} + +func (m *DistributedPyTorchTrainingTask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributedPyTorchTrainingTask.Unmarshal(m, b) +} +func (m *DistributedPyTorchTrainingTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributedPyTorchTrainingTask.Marshal(b, m, deterministic) +} +func (m *DistributedPyTorchTrainingTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributedPyTorchTrainingTask.Merge(m, src) +} +func (m *DistributedPyTorchTrainingTask) XXX_Size() int { + return xxx_messageInfo_DistributedPyTorchTrainingTask.Size(m) +} +func (m *DistributedPyTorchTrainingTask) XXX_DiscardUnknown() { + xxx_messageInfo_DistributedPyTorchTrainingTask.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributedPyTorchTrainingTask proto.InternalMessageInfo + +func (m *DistributedPyTorchTrainingTask) GetWorkers() int32 { + if m != nil { + return m.Workers + } + return 0 +} + +func init() { + proto.RegisterType((*DistributedPyTorchTrainingTask)(nil), "flyteidl.plugins.DistributedPyTorchTrainingTask") +} + +func init() { proto.RegisterFile("flyteidl/plugins/pytorch.proto", fileDescriptor_4df8a9374b28b766) } + +var fileDescriptor_4df8a9374b28b766 = []byte{ + // 156 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcb, 0xa9, 0x2c, + 0x49, 0xcd, 0x4c, 0xc9, 0xd1, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x2b, 0xd6, 0x2f, 0xa8, 0x2c, + 0xc9, 0x2f, 0x4a, 0xce, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x80, 0xc9, 0xeb, 0x41, + 0xe5, 0x95, 0xac, 0xb8, 0xe4, 0x5c, 0x32, 0x8b, 0x4b, 0x8a, 0x32, 0x93, 0x4a, 0x4b, 0x52, 0x53, + 0x02, 0x2a, 0x43, 0x40, 0xaa, 0x43, 0x8a, 0x12, 0x33, 0xf3, 0x32, 0xf3, 0xd2, 0x43, 0x12, 0x8b, + 0xb3, 0x85, 0x24, 0xb8, 0xd8, 0xcb, 0xf3, 0x8b, 0xb2, 0x53, 0x8b, 0x8a, 0x25, 0x18, 0x15, 0x18, + 0x35, 0x58, 0x83, 0x60, 0x5c, 0x27, 0xcb, 0x28, 0xf3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, + 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0xd1, 0xf9, 0x45, 0xe9, 0xfa, 0x70, 0x37, 0xa4, 0xa7, 0xe6, 0xe9, + 0x17, 0x24, 0xe9, 0xa6, 0xe7, 0xeb, 0xa3, 0x3b, 0x2b, 0x89, 0x0d, 0xec, 0x1e, 0x63, 0x40, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x91, 0x53, 0x3a, 0xa1, 0xb1, 0x00, 0x00, 0x00, +} diff --git a/gen/pb-go/flyteidl/plugins/pytorch.pb.validate.go b/gen/pb-go/flyteidl/plugins/pytorch.pb.validate.go new file mode 100644 index 000000000..8e6af9852 --- /dev/null +++ b/gen/pb-go/flyteidl/plugins/pytorch.pb.validate.go @@ -0,0 +1,107 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl/plugins/pytorch.proto + +package plugins + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/ptypes" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = ptypes.DynamicAny{} +) + +// define the regex for a UUID once up-front +var _pytorch_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") + +// Validate checks the field values on DistributedPyTorchTrainingTask with the +// rules defined in the proto definition for this message. If any rules are +// violated, an error is returned. +func (m *DistributedPyTorchTrainingTask) Validate() error { + if m == nil { + return nil + } + + // no validation rules for Workers + + return nil +} + +// DistributedPyTorchTrainingTaskValidationError is the validation error +// returned by DistributedPyTorchTrainingTask.Validate if the designated +// constraints aren't met. +type DistributedPyTorchTrainingTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedPyTorchTrainingTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedPyTorchTrainingTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedPyTorchTrainingTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedPyTorchTrainingTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedPyTorchTrainingTaskValidationError) ErrorName() string { + return "DistributedPyTorchTrainingTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedPyTorchTrainingTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedPyTorchTrainingTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedPyTorchTrainingTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedPyTorchTrainingTaskValidationError{} diff --git a/gen/pb-go/flyteidl/plugins/tensorflow.pb.go b/gen/pb-go/flyteidl/plugins/tensorflow.pb.go new file mode 100644 index 000000000..f466c1c97 --- /dev/null +++ b/gen/pb-go/flyteidl/plugins/tensorflow.pb.go @@ -0,0 +1,102 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: flyteidl/plugins/tensorflow.proto + +package plugins + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator +type DistributedTensorflowTrainingTask struct { + // number of worker, ps, chief replicas spawned in the cluster for this job + Workers int32 `protobuf:"varint,1,opt,name=workers,proto3" json:"workers,omitempty"` + // PS -> Parameter server + PsReplicas int32 `protobuf:"varint,2,opt,name=ps_replicas,json=psReplicas,proto3" json:"ps_replicas,omitempty"` + ChiefReplicas int32 `protobuf:"varint,3,opt,name=chief_replicas,json=chiefReplicas,proto3" json:"chief_replicas,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributedTensorflowTrainingTask) Reset() { *m = DistributedTensorflowTrainingTask{} } +func (m *DistributedTensorflowTrainingTask) String() string { return proto.CompactTextString(m) } +func (*DistributedTensorflowTrainingTask) ProtoMessage() {} +func (*DistributedTensorflowTrainingTask) Descriptor() ([]byte, []int) { + return fileDescriptor_8da02783614e1bcc, []int{0} +} + +func (m *DistributedTensorflowTrainingTask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributedTensorflowTrainingTask.Unmarshal(m, b) +} +func (m *DistributedTensorflowTrainingTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributedTensorflowTrainingTask.Marshal(b, m, deterministic) +} +func (m *DistributedTensorflowTrainingTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributedTensorflowTrainingTask.Merge(m, src) +} +func (m *DistributedTensorflowTrainingTask) XXX_Size() int { + return xxx_messageInfo_DistributedTensorflowTrainingTask.Size(m) +} +func (m *DistributedTensorflowTrainingTask) XXX_DiscardUnknown() { + xxx_messageInfo_DistributedTensorflowTrainingTask.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributedTensorflowTrainingTask proto.InternalMessageInfo + +func (m *DistributedTensorflowTrainingTask) GetWorkers() int32 { + if m != nil { + return m.Workers + } + return 0 +} + +func (m *DistributedTensorflowTrainingTask) GetPsReplicas() int32 { + if m != nil { + return m.PsReplicas + } + return 0 +} + +func (m *DistributedTensorflowTrainingTask) GetChiefReplicas() int32 { + if m != nil { + return m.ChiefReplicas + } + return 0 +} + +func init() { + proto.RegisterType((*DistributedTensorflowTrainingTask)(nil), "flyteidl.plugins.DistributedTensorflowTrainingTask") +} + +func init() { proto.RegisterFile("flyteidl/plugins/tensorflow.proto", fileDescriptor_8da02783614e1bcc) } + +var fileDescriptor_8da02783614e1bcc = []byte{ + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcb, 0xa9, 0x2c, + 0x49, 0xcd, 0x4c, 0xc9, 0xd1, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x2b, 0xd6, 0x2f, 0x49, 0xcd, + 0x2b, 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x80, + 0x29, 0xd1, 0x83, 0x2a, 0x51, 0x6a, 0x65, 0xe4, 0x52, 0x74, 0xc9, 0x2c, 0x2e, 0x29, 0xca, 0x4c, + 0x2a, 0x2d, 0x49, 0x4d, 0x09, 0x81, 0xeb, 0x08, 0x29, 0x4a, 0xcc, 0xcc, 0xcb, 0xcc, 0x4b, 0x0f, + 0x49, 0x2c, 0xce, 0x16, 0x92, 0xe0, 0x62, 0x2f, 0xcf, 0x2f, 0xca, 0x4e, 0x2d, 0x2a, 0x96, 0x60, + 0x54, 0x60, 0xd4, 0x60, 0x0d, 0x82, 0x71, 0x85, 0xe4, 0xb9, 0xb8, 0x0b, 0x8a, 0xe3, 0x8b, 0x52, + 0x0b, 0x72, 0x32, 0x93, 0x13, 0x8b, 0x25, 0x98, 0xc0, 0xb2, 0x5c, 0x05, 0xc5, 0x41, 0x50, 0x11, + 0x21, 0x55, 0x2e, 0xbe, 0xe4, 0x8c, 0xcc, 0xd4, 0x34, 0x84, 0x1a, 0x66, 0xb0, 0x1a, 0x5e, 0xb0, + 0x28, 0x4c, 0x99, 0x93, 0x65, 0x94, 0x79, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, + 0xae, 0x3e, 0xd8, 0x99, 0xf9, 0x45, 0xe9, 0xfa, 0x70, 0x2f, 0xa5, 0xa7, 0xe6, 0xe9, 0x17, 0x24, + 0xe9, 0xa6, 0xe7, 0xeb, 0xa3, 0xfb, 0x32, 0x89, 0x0d, 0xec, 0x37, 0x63, 0x40, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x8f, 0xf1, 0xb9, 0xb2, 0x00, 0x01, 0x00, 0x00, +} diff --git a/gen/pb-go/flyteidl/plugins/tensorflow.pb.validate.go b/gen/pb-go/flyteidl/plugins/tensorflow.pb.validate.go new file mode 100644 index 000000000..ed7a8eeb8 --- /dev/null +++ b/gen/pb-go/flyteidl/plugins/tensorflow.pb.validate.go @@ -0,0 +1,111 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl/plugins/tensorflow.proto + +package plugins + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/ptypes" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = ptypes.DynamicAny{} +) + +// define the regex for a UUID once up-front +var _tensorflow_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") + +// Validate checks the field values on DistributedTensorflowTrainingTask with +// the rules defined in the proto definition for this message. If any rules +// are violated, an error is returned. +func (m *DistributedTensorflowTrainingTask) Validate() error { + if m == nil { + return nil + } + + // no validation rules for Workers + + // no validation rules for PsReplicas + + // no validation rules for ChiefReplicas + + return nil +} + +// DistributedTensorflowTrainingTaskValidationError is the validation error +// returned by DistributedTensorflowTrainingTask.Validate if the designated +// constraints aren't met. +type DistributedTensorflowTrainingTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedTensorflowTrainingTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedTensorflowTrainingTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedTensorflowTrainingTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedTensorflowTrainingTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedTensorflowTrainingTaskValidationError) ErrorName() string { + return "DistributedTensorflowTrainingTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedTensorflowTrainingTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedTensorflowTrainingTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedTensorflowTrainingTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedTensorflowTrainingTaskValidationError{} diff --git a/gen/pb-java/flyteidl/plugins/Mpi.java b/gen/pb-java/flyteidl/plugins/Mpi.java new file mode 100644 index 000000000..8adb21af9 --- /dev/null +++ b/gen/pb-java/flyteidl/plugins/Mpi.java @@ -0,0 +1,737 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flyteidl/plugins/mpi.proto + +package flyteidl.plugins; + +public final class Mpi { + private Mpi() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + public interface DistributedMPITrainingTaskOrBuilder extends + // @@protoc_insertion_point(interface_extends:flyteidl.plugins.DistributedMPITrainingTask) + com.google.protobuf.MessageOrBuilder { + + /** + *
+     * number of worker spawned in the cluster for this job
+     * 
+ * + * int32 num_workers = 1; + */ + int getNumWorkers(); + + /** + *
+     * number of launcher replicas spawned in the cluster for this job
+     * The launcher pod invokes mpirun and communicates with worker pods through MPI.
+     * 
+ * + * int32 num_launcher_replicas = 2; + */ + int getNumLauncherReplicas(); + + /** + *
+     * number of slots per worker used in hostfile.
+     * The available slots (GPUs) in each pod.
+     * 
+ * + * int32 slots = 3; + */ + int getSlots(); + } + /** + *
+   * MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md
+   * Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator
+   * 
+ * + * Protobuf type {@code flyteidl.plugins.DistributedMPITrainingTask} + */ + public static final class DistributedMPITrainingTask extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:flyteidl.plugins.DistributedMPITrainingTask) + DistributedMPITrainingTaskOrBuilder { + private static final long serialVersionUID = 0L; + // Use DistributedMPITrainingTask.newBuilder() to construct. + private DistributedMPITrainingTask(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DistributedMPITrainingTask() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DistributedMPITrainingTask( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + + numWorkers_ = input.readInt32(); + break; + } + case 16: { + + numLauncherReplicas_ = input.readInt32(); + break; + } + case 24: { + + slots_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return flyteidl.plugins.Mpi.internal_static_flyteidl_plugins_DistributedMPITrainingTask_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return flyteidl.plugins.Mpi.internal_static_flyteidl_plugins_DistributedMPITrainingTask_fieldAccessorTable + .ensureFieldAccessorsInitialized( + flyteidl.plugins.Mpi.DistributedMPITrainingTask.class, flyteidl.plugins.Mpi.DistributedMPITrainingTask.Builder.class); + } + + public static final int NUM_WORKERS_FIELD_NUMBER = 1; + private int numWorkers_; + /** + *
+     * number of worker spawned in the cluster for this job
+     * 
+ * + * int32 num_workers = 1; + */ + public int getNumWorkers() { + return numWorkers_; + } + + public static final int NUM_LAUNCHER_REPLICAS_FIELD_NUMBER = 2; + private int numLauncherReplicas_; + /** + *
+     * number of launcher replicas spawned in the cluster for this job
+     * The launcher pod invokes mpirun and communicates with worker pods through MPI.
+     * 
+ * + * int32 num_launcher_replicas = 2; + */ + public int getNumLauncherReplicas() { + return numLauncherReplicas_; + } + + public static final int SLOTS_FIELD_NUMBER = 3; + private int slots_; + /** + *
+     * number of slots per worker used in hostfile.
+     * The available slots (GPUs) in each pod.
+     * 
+ * + * int32 slots = 3; + */ + public int getSlots() { + return slots_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (numWorkers_ != 0) { + output.writeInt32(1, numWorkers_); + } + if (numLauncherReplicas_ != 0) { + output.writeInt32(2, numLauncherReplicas_); + } + if (slots_ != 0) { + output.writeInt32(3, slots_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (numWorkers_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, numWorkers_); + } + if (numLauncherReplicas_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, numLauncherReplicas_); + } + if (slots_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, slots_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof flyteidl.plugins.Mpi.DistributedMPITrainingTask)) { + return super.equals(obj); + } + flyteidl.plugins.Mpi.DistributedMPITrainingTask other = (flyteidl.plugins.Mpi.DistributedMPITrainingTask) obj; + + if (getNumWorkers() + != other.getNumWorkers()) return false; + if (getNumLauncherReplicas() + != other.getNumLauncherReplicas()) return false; + if (getSlots() + != other.getSlots()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NUM_WORKERS_FIELD_NUMBER; + hash = (53 * hash) + getNumWorkers(); + hash = (37 * hash) + NUM_LAUNCHER_REPLICAS_FIELD_NUMBER; + hash = (53 * hash) + getNumLauncherReplicas(); + hash = (37 * hash) + SLOTS_FIELD_NUMBER; + hash = (53 * hash) + getSlots(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(flyteidl.plugins.Mpi.DistributedMPITrainingTask prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     * MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md
+     * Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator
+     * 
+ * + * Protobuf type {@code flyteidl.plugins.DistributedMPITrainingTask} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:flyteidl.plugins.DistributedMPITrainingTask) + flyteidl.plugins.Mpi.DistributedMPITrainingTaskOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return flyteidl.plugins.Mpi.internal_static_flyteidl_plugins_DistributedMPITrainingTask_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return flyteidl.plugins.Mpi.internal_static_flyteidl_plugins_DistributedMPITrainingTask_fieldAccessorTable + .ensureFieldAccessorsInitialized( + flyteidl.plugins.Mpi.DistributedMPITrainingTask.class, flyteidl.plugins.Mpi.DistributedMPITrainingTask.Builder.class); + } + + // Construct using flyteidl.plugins.Mpi.DistributedMPITrainingTask.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + numWorkers_ = 0; + + numLauncherReplicas_ = 0; + + slots_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return flyteidl.plugins.Mpi.internal_static_flyteidl_plugins_DistributedMPITrainingTask_descriptor; + } + + @java.lang.Override + public flyteidl.plugins.Mpi.DistributedMPITrainingTask getDefaultInstanceForType() { + return flyteidl.plugins.Mpi.DistributedMPITrainingTask.getDefaultInstance(); + } + + @java.lang.Override + public flyteidl.plugins.Mpi.DistributedMPITrainingTask build() { + flyteidl.plugins.Mpi.DistributedMPITrainingTask result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public flyteidl.plugins.Mpi.DistributedMPITrainingTask buildPartial() { + flyteidl.plugins.Mpi.DistributedMPITrainingTask result = new flyteidl.plugins.Mpi.DistributedMPITrainingTask(this); + result.numWorkers_ = numWorkers_; + result.numLauncherReplicas_ = numLauncherReplicas_; + result.slots_ = slots_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof flyteidl.plugins.Mpi.DistributedMPITrainingTask) { + return mergeFrom((flyteidl.plugins.Mpi.DistributedMPITrainingTask)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(flyteidl.plugins.Mpi.DistributedMPITrainingTask other) { + if (other == flyteidl.plugins.Mpi.DistributedMPITrainingTask.getDefaultInstance()) return this; + if (other.getNumWorkers() != 0) { + setNumWorkers(other.getNumWorkers()); + } + if (other.getNumLauncherReplicas() != 0) { + setNumLauncherReplicas(other.getNumLauncherReplicas()); + } + if (other.getSlots() != 0) { + setSlots(other.getSlots()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + flyteidl.plugins.Mpi.DistributedMPITrainingTask parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (flyteidl.plugins.Mpi.DistributedMPITrainingTask) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int numWorkers_ ; + /** + *
+       * number of worker spawned in the cluster for this job
+       * 
+ * + * int32 num_workers = 1; + */ + public int getNumWorkers() { + return numWorkers_; + } + /** + *
+       * number of worker spawned in the cluster for this job
+       * 
+ * + * int32 num_workers = 1; + */ + public Builder setNumWorkers(int value) { + + numWorkers_ = value; + onChanged(); + return this; + } + /** + *
+       * number of worker spawned in the cluster for this job
+       * 
+ * + * int32 num_workers = 1; + */ + public Builder clearNumWorkers() { + + numWorkers_ = 0; + onChanged(); + return this; + } + + private int numLauncherReplicas_ ; + /** + *
+       * number of launcher replicas spawned in the cluster for this job
+       * The launcher pod invokes mpirun and communicates with worker pods through MPI.
+       * 
+ * + * int32 num_launcher_replicas = 2; + */ + public int getNumLauncherReplicas() { + return numLauncherReplicas_; + } + /** + *
+       * number of launcher replicas spawned in the cluster for this job
+       * The launcher pod invokes mpirun and communicates with worker pods through MPI.
+       * 
+ * + * int32 num_launcher_replicas = 2; + */ + public Builder setNumLauncherReplicas(int value) { + + numLauncherReplicas_ = value; + onChanged(); + return this; + } + /** + *
+       * number of launcher replicas spawned in the cluster for this job
+       * The launcher pod invokes mpirun and communicates with worker pods through MPI.
+       * 
+ * + * int32 num_launcher_replicas = 2; + */ + public Builder clearNumLauncherReplicas() { + + numLauncherReplicas_ = 0; + onChanged(); + return this; + } + + private int slots_ ; + /** + *
+       * number of slots per worker used in hostfile.
+       * The available slots (GPUs) in each pod.
+       * 
+ * + * int32 slots = 3; + */ + public int getSlots() { + return slots_; + } + /** + *
+       * number of slots per worker used in hostfile.
+       * The available slots (GPUs) in each pod.
+       * 
+ * + * int32 slots = 3; + */ + public Builder setSlots(int value) { + + slots_ = value; + onChanged(); + return this; + } + /** + *
+       * number of slots per worker used in hostfile.
+       * The available slots (GPUs) in each pod.
+       * 
+ * + * int32 slots = 3; + */ + public Builder clearSlots() { + + slots_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:flyteidl.plugins.DistributedMPITrainingTask) + } + + // @@protoc_insertion_point(class_scope:flyteidl.plugins.DistributedMPITrainingTask) + private static final flyteidl.plugins.Mpi.DistributedMPITrainingTask DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new flyteidl.plugins.Mpi.DistributedMPITrainingTask(); + } + + public static flyteidl.plugins.Mpi.DistributedMPITrainingTask getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DistributedMPITrainingTask parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DistributedMPITrainingTask(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public flyteidl.plugins.Mpi.DistributedMPITrainingTask getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_flyteidl_plugins_DistributedMPITrainingTask_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_flyteidl_plugins_DistributedMPITrainingTask_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\032flyteidl/plugins/mpi.proto\022\020flyteidl.p" + + "lugins\"_\n\032DistributedMPITrainingTask\022\023\n\013" + + "num_workers\030\001 \001(\005\022\035\n\025num_launcher_replic" + + "as\030\002 \001(\005\022\r\n\005slots\030\003 \001(\005B9Z7github.com/fl" + + "yteorg/flyteidl/gen/pb-go/flyteidl/plugi" + + "nsb\006proto3" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + internal_static_flyteidl_plugins_DistributedMPITrainingTask_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_flyteidl_plugins_DistributedMPITrainingTask_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_flyteidl_plugins_DistributedMPITrainingTask_descriptor, + new java.lang.String[] { "NumWorkers", "NumLauncherReplicas", "Slots", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/gen/pb-java/flyteidl/plugins/Pytorch.java b/gen/pb-java/flyteidl/plugins/Pytorch.java new file mode 100644 index 000000000..a7709263f --- /dev/null +++ b/gen/pb-java/flyteidl/plugins/Pytorch.java @@ -0,0 +1,560 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flyteidl/plugins/pytorch.proto + +package flyteidl.plugins; + +public final class Pytorch { + private Pytorch() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + public interface DistributedPyTorchTrainingTaskOrBuilder extends + // @@protoc_insertion_point(interface_extends:flyteidl.plugins.DistributedPyTorchTrainingTask) + com.google.protobuf.MessageOrBuilder { + + /** + *
+     * number of worker replicas spawned in the cluster for this job
+     * 
+ * + * int32 workers = 1; + */ + int getWorkers(); + } + /** + *
+   * Custom proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator
+   * 
+ * + * Protobuf type {@code flyteidl.plugins.DistributedPyTorchTrainingTask} + */ + public static final class DistributedPyTorchTrainingTask extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:flyteidl.plugins.DistributedPyTorchTrainingTask) + DistributedPyTorchTrainingTaskOrBuilder { + private static final long serialVersionUID = 0L; + // Use DistributedPyTorchTrainingTask.newBuilder() to construct. + private DistributedPyTorchTrainingTask(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DistributedPyTorchTrainingTask() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DistributedPyTorchTrainingTask( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + + workers_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return flyteidl.plugins.Pytorch.internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return flyteidl.plugins.Pytorch.internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_fieldAccessorTable + .ensureFieldAccessorsInitialized( + flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask.class, flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask.Builder.class); + } + + public static final int WORKERS_FIELD_NUMBER = 1; + private int workers_; + /** + *
+     * number of worker replicas spawned in the cluster for this job
+     * 
+ * + * int32 workers = 1; + */ + public int getWorkers() { + return workers_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (workers_ != 0) { + output.writeInt32(1, workers_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (workers_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, workers_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask)) { + return super.equals(obj); + } + flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask other = (flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask) obj; + + if (getWorkers() + != other.getWorkers()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WORKERS_FIELD_NUMBER; + hash = (53 * hash) + getWorkers(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     * Custom proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator
+     * 
+ * + * Protobuf type {@code flyteidl.plugins.DistributedPyTorchTrainingTask} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:flyteidl.plugins.DistributedPyTorchTrainingTask) + flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTaskOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return flyteidl.plugins.Pytorch.internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return flyteidl.plugins.Pytorch.internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_fieldAccessorTable + .ensureFieldAccessorsInitialized( + flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask.class, flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask.Builder.class); + } + + // Construct using flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + workers_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return flyteidl.plugins.Pytorch.internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_descriptor; + } + + @java.lang.Override + public flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask getDefaultInstanceForType() { + return flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask.getDefaultInstance(); + } + + @java.lang.Override + public flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask build() { + flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask buildPartial() { + flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask result = new flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask(this); + result.workers_ = workers_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask) { + return mergeFrom((flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask other) { + if (other == flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask.getDefaultInstance()) return this; + if (other.getWorkers() != 0) { + setWorkers(other.getWorkers()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int workers_ ; + /** + *
+       * number of worker replicas spawned in the cluster for this job
+       * 
+ * + * int32 workers = 1; + */ + public int getWorkers() { + return workers_; + } + /** + *
+       * number of worker replicas spawned in the cluster for this job
+       * 
+ * + * int32 workers = 1; + */ + public Builder setWorkers(int value) { + + workers_ = value; + onChanged(); + return this; + } + /** + *
+       * number of worker replicas spawned in the cluster for this job
+       * 
+ * + * int32 workers = 1; + */ + public Builder clearWorkers() { + + workers_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:flyteidl.plugins.DistributedPyTorchTrainingTask) + } + + // @@protoc_insertion_point(class_scope:flyteidl.plugins.DistributedPyTorchTrainingTask) + private static final flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask(); + } + + public static flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DistributedPyTorchTrainingTask parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DistributedPyTorchTrainingTask(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public flyteidl.plugins.Pytorch.DistributedPyTorchTrainingTask getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\036flyteidl/plugins/pytorch.proto\022\020flytei" + + "dl.plugins\"1\n\036DistributedPyTorchTraining" + + "Task\022\017\n\007workers\030\001 \001(\005B9Z7github.com/flyt" + + "eorg/flyteidl/gen/pb-go/flyteidl/plugins" + + "b\006proto3" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_flyteidl_plugins_DistributedPyTorchTrainingTask_descriptor, + new java.lang.String[] { "Workers", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/gen/pb-java/flyteidl/plugins/Tensorflow.java b/gen/pb-java/flyteidl/plugins/Tensorflow.java new file mode 100644 index 000000000..b1a1862ae --- /dev/null +++ b/gen/pb-java/flyteidl/plugins/Tensorflow.java @@ -0,0 +1,705 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flyteidl/plugins/tensorflow.proto + +package flyteidl.plugins; + +public final class Tensorflow { + private Tensorflow() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + public interface DistributedTensorflowTrainingTaskOrBuilder extends + // @@protoc_insertion_point(interface_extends:flyteidl.plugins.DistributedTensorflowTrainingTask) + com.google.protobuf.MessageOrBuilder { + + /** + *
+     * number of worker, ps, chief replicas spawned in the cluster for this job
+     * 
+ * + * int32 workers = 1; + */ + int getWorkers(); + + /** + *
+     * PS -> Parameter server
+     * 
+ * + * int32 ps_replicas = 2; + */ + int getPsReplicas(); + + /** + * int32 chief_replicas = 3; + */ + int getChiefReplicas(); + } + /** + *
+   * Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator
+   * 
+ * + * Protobuf type {@code flyteidl.plugins.DistributedTensorflowTrainingTask} + */ + public static final class DistributedTensorflowTrainingTask extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:flyteidl.plugins.DistributedTensorflowTrainingTask) + DistributedTensorflowTrainingTaskOrBuilder { + private static final long serialVersionUID = 0L; + // Use DistributedTensorflowTrainingTask.newBuilder() to construct. + private DistributedTensorflowTrainingTask(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DistributedTensorflowTrainingTask() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DistributedTensorflowTrainingTask( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + + workers_ = input.readInt32(); + break; + } + case 16: { + + psReplicas_ = input.readInt32(); + break; + } + case 24: { + + chiefReplicas_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return flyteidl.plugins.Tensorflow.internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return flyteidl.plugins.Tensorflow.internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_fieldAccessorTable + .ensureFieldAccessorsInitialized( + flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask.class, flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask.Builder.class); + } + + public static final int WORKERS_FIELD_NUMBER = 1; + private int workers_; + /** + *
+     * number of worker, ps, chief replicas spawned in the cluster for this job
+     * 
+ * + * int32 workers = 1; + */ + public int getWorkers() { + return workers_; + } + + public static final int PS_REPLICAS_FIELD_NUMBER = 2; + private int psReplicas_; + /** + *
+     * PS -> Parameter server
+     * 
+ * + * int32 ps_replicas = 2; + */ + public int getPsReplicas() { + return psReplicas_; + } + + public static final int CHIEF_REPLICAS_FIELD_NUMBER = 3; + private int chiefReplicas_; + /** + * int32 chief_replicas = 3; + */ + public int getChiefReplicas() { + return chiefReplicas_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (workers_ != 0) { + output.writeInt32(1, workers_); + } + if (psReplicas_ != 0) { + output.writeInt32(2, psReplicas_); + } + if (chiefReplicas_ != 0) { + output.writeInt32(3, chiefReplicas_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (workers_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, workers_); + } + if (psReplicas_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, psReplicas_); + } + if (chiefReplicas_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, chiefReplicas_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask)) { + return super.equals(obj); + } + flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask other = (flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask) obj; + + if (getWorkers() + != other.getWorkers()) return false; + if (getPsReplicas() + != other.getPsReplicas()) return false; + if (getChiefReplicas() + != other.getChiefReplicas()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WORKERS_FIELD_NUMBER; + hash = (53 * hash) + getWorkers(); + hash = (37 * hash) + PS_REPLICAS_FIELD_NUMBER; + hash = (53 * hash) + getPsReplicas(); + hash = (37 * hash) + CHIEF_REPLICAS_FIELD_NUMBER; + hash = (53 * hash) + getChiefReplicas(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     * Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator
+     * 
+ * + * Protobuf type {@code flyteidl.plugins.DistributedTensorflowTrainingTask} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:flyteidl.plugins.DistributedTensorflowTrainingTask) + flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTaskOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return flyteidl.plugins.Tensorflow.internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return flyteidl.plugins.Tensorflow.internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_fieldAccessorTable + .ensureFieldAccessorsInitialized( + flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask.class, flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask.Builder.class); + } + + // Construct using flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + workers_ = 0; + + psReplicas_ = 0; + + chiefReplicas_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return flyteidl.plugins.Tensorflow.internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_descriptor; + } + + @java.lang.Override + public flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask getDefaultInstanceForType() { + return flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask.getDefaultInstance(); + } + + @java.lang.Override + public flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask build() { + flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask buildPartial() { + flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask result = new flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask(this); + result.workers_ = workers_; + result.psReplicas_ = psReplicas_; + result.chiefReplicas_ = chiefReplicas_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask) { + return mergeFrom((flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask other) { + if (other == flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask.getDefaultInstance()) return this; + if (other.getWorkers() != 0) { + setWorkers(other.getWorkers()); + } + if (other.getPsReplicas() != 0) { + setPsReplicas(other.getPsReplicas()); + } + if (other.getChiefReplicas() != 0) { + setChiefReplicas(other.getChiefReplicas()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int workers_ ; + /** + *
+       * number of worker, ps, chief replicas spawned in the cluster for this job
+       * 
+ * + * int32 workers = 1; + */ + public int getWorkers() { + return workers_; + } + /** + *
+       * number of worker, ps, chief replicas spawned in the cluster for this job
+       * 
+ * + * int32 workers = 1; + */ + public Builder setWorkers(int value) { + + workers_ = value; + onChanged(); + return this; + } + /** + *
+       * number of worker, ps, chief replicas spawned in the cluster for this job
+       * 
+ * + * int32 workers = 1; + */ + public Builder clearWorkers() { + + workers_ = 0; + onChanged(); + return this; + } + + private int psReplicas_ ; + /** + *
+       * PS -> Parameter server
+       * 
+ * + * int32 ps_replicas = 2; + */ + public int getPsReplicas() { + return psReplicas_; + } + /** + *
+       * PS -> Parameter server
+       * 
+ * + * int32 ps_replicas = 2; + */ + public Builder setPsReplicas(int value) { + + psReplicas_ = value; + onChanged(); + return this; + } + /** + *
+       * PS -> Parameter server
+       * 
+ * + * int32 ps_replicas = 2; + */ + public Builder clearPsReplicas() { + + psReplicas_ = 0; + onChanged(); + return this; + } + + private int chiefReplicas_ ; + /** + * int32 chief_replicas = 3; + */ + public int getChiefReplicas() { + return chiefReplicas_; + } + /** + * int32 chief_replicas = 3; + */ + public Builder setChiefReplicas(int value) { + + chiefReplicas_ = value; + onChanged(); + return this; + } + /** + * int32 chief_replicas = 3; + */ + public Builder clearChiefReplicas() { + + chiefReplicas_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:flyteidl.plugins.DistributedTensorflowTrainingTask) + } + + // @@protoc_insertion_point(class_scope:flyteidl.plugins.DistributedTensorflowTrainingTask) + private static final flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask(); + } + + public static flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DistributedTensorflowTrainingTask parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DistributedTensorflowTrainingTask(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public flyteidl.plugins.Tensorflow.DistributedTensorflowTrainingTask getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n!flyteidl/plugins/tensorflow.proto\022\020fly" + + "teidl.plugins\"a\n!DistributedTensorflowTr" + + "ainingTask\022\017\n\007workers\030\001 \001(\005\022\023\n\013ps_replic" + + "as\030\002 \001(\005\022\026\n\016chief_replicas\030\003 \001(\005B9Z7gith" + + "ub.com/flyteorg/flyteidl/gen/pb-go/flyte" + + "idl/pluginsb\006proto3" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_flyteidl_plugins_DistributedTensorflowTrainingTask_descriptor, + new java.lang.String[] { "Workers", "PsReplicas", "ChiefReplicas", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/gen/pb-java/flyteidl/plugins/kubeflow/Common.java b/gen/pb-java/flyteidl/plugins/kubeflow/Common.java index 33109bb8c..7abca5396 100644 --- a/gen/pb-java/flyteidl/plugins/kubeflow/Common.java +++ b/gen/pb-java/flyteidl/plugins/kubeflow/Common.java @@ -14,144 +14,62 @@ public static void registerAllExtensions( registerAllExtensions( (com.google.protobuf.ExtensionRegistryLite) registry); } - /** - * Protobuf enum {@code flyteidl.plugins.kubeflow.SuccessPolicy} - */ - public enum SuccessPolicy - implements com.google.protobuf.ProtocolMessageEnum { - /** - * SUCCESS_POLICY_DEFAULT = 0; - */ - SUCCESS_POLICY_DEFAULT(0), - /** - * SUCCESS_POLICY_ALL_WORKERS = 1; - */ - SUCCESS_POLICY_ALL_WORKERS(1), - UNRECOGNIZED(-1), - ; - - /** - * SUCCESS_POLICY_DEFAULT = 0; - */ - public static final int SUCCESS_POLICY_DEFAULT_VALUE = 0; - /** - * SUCCESS_POLICY_ALL_WORKERS = 1; - */ - public static final int SUCCESS_POLICY_ALL_WORKERS_VALUE = 1; - - - public final int getNumber() { - if (this == UNRECOGNIZED) { - throw new java.lang.IllegalArgumentException( - "Can't get the number of an unknown enum value."); - } - return value; - } - - /** - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static SuccessPolicy valueOf(int value) { - return forNumber(value); - } - - public static SuccessPolicy forNumber(int value) { - switch (value) { - case 0: return SUCCESS_POLICY_DEFAULT; - case 1: return SUCCESS_POLICY_ALL_WORKERS; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static final com.google.protobuf.Internal.EnumLiteMap< - SuccessPolicy> internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public SuccessPolicy findValueByNumber(int number) { - return SuccessPolicy.forNumber(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(ordinal()); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return flyteidl.plugins.kubeflow.Common.getDescriptor().getEnumTypes().get(0); - } - - private static final SuccessPolicy[] VALUES = values(); - - public static SuccessPolicy valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - if (desc.getIndex() == -1) { - return UNRECOGNIZED; - } - return VALUES[desc.getIndex()]; - } - - private final int value; - - private SuccessPolicy(int value) { - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:flyteidl.plugins.kubeflow.SuccessPolicy) - } - /** * Protobuf enum {@code flyteidl.plugins.kubeflow.CleanPodPolicy} */ public enum CleanPodPolicy implements com.google.protobuf.ProtocolMessageEnum { /** - * CLEANPOD_POLICY_UNDEFINED = 0; - */ - CLEANPOD_POLICY_UNDEFINED(0), - /** - * CLEANPOD_POLICY_ALL = 1; + *
+     * The All policy means all pods even completed pods will be deleted immediately when the job finishes.
+     * 
+ * + * CLEANPOD_POLICY_ALL = 0; */ - CLEANPOD_POLICY_ALL(1), + CLEANPOD_POLICY_ALL(0), /** - * CLEANPOD_POLICY_RUNNING = 2; + *
+     * The Running policy means that only pods still running when a job completes (e.g. parameter servers) will be deleted immediately; completed pods will not be deleted so that the logs will be preserved. This is the default value.
+     * 
+ * + * CLEANPOD_POLICY_RUNNING = 1; */ - CLEANPOD_POLICY_RUNNING(2), + CLEANPOD_POLICY_RUNNING(1), /** - * CLEANPOD_POLICY_NONE = 3; + *
+     * The None policy means that no pods will be deleted when the job completes.
+     * 
+ * + * CLEANPOD_POLICY_NONE = 2; */ - CLEANPOD_POLICY_NONE(3), + CLEANPOD_POLICY_NONE(2), UNRECOGNIZED(-1), ; /** - * CLEANPOD_POLICY_UNDEFINED = 0; - */ - public static final int CLEANPOD_POLICY_UNDEFINED_VALUE = 0; - /** - * CLEANPOD_POLICY_ALL = 1; + *
+     * The All policy means all pods even completed pods will be deleted immediately when the job finishes.
+     * 
+ * + * CLEANPOD_POLICY_ALL = 0; */ - public static final int CLEANPOD_POLICY_ALL_VALUE = 1; + public static final int CLEANPOD_POLICY_ALL_VALUE = 0; /** - * CLEANPOD_POLICY_RUNNING = 2; + *
+     * The Running policy means that only pods still running when a job completes (e.g. parameter servers) will be deleted immediately; completed pods will not be deleted so that the logs will be preserved. This is the default value.
+     * 
+ * + * CLEANPOD_POLICY_RUNNING = 1; */ - public static final int CLEANPOD_POLICY_RUNNING_VALUE = 2; + public static final int CLEANPOD_POLICY_RUNNING_VALUE = 1; /** - * CLEANPOD_POLICY_NONE = 3; + *
+     * The None policy means that no pods will be deleted when the job completes.
+     * 
+ * + * CLEANPOD_POLICY_NONE = 2; */ - public static final int CLEANPOD_POLICY_NONE_VALUE = 3; + public static final int CLEANPOD_POLICY_NONE_VALUE = 2; public final int getNumber() { @@ -172,10 +90,9 @@ public static CleanPodPolicy valueOf(int value) { public static CleanPodPolicy forNumber(int value) { switch (value) { - case 0: return CLEANPOD_POLICY_UNDEFINED; - case 1: return CLEANPOD_POLICY_ALL; - case 2: return CLEANPOD_POLICY_RUNNING; - case 3: return CLEANPOD_POLICY_NONE; + case 0: return CLEANPOD_POLICY_ALL; + case 1: return CLEANPOD_POLICY_RUNNING; + case 2: return CLEANPOD_POLICY_NONE; default: return null; } } @@ -202,7 +119,7 @@ public CleanPodPolicy findValueByNumber(int number) { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return flyteidl.plugins.kubeflow.Common.getDescriptor().getEnumTypes().get(1); + return flyteidl.plugins.kubeflow.Common.getDescriptor().getEnumTypes().get(0); } private static final CleanPodPolicy[] VALUES = values(); @@ -343,7 +260,7 @@ public RestartPolicy findValueByNumber(int number) { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return flyteidl.plugins.kubeflow.Common.getDescriptor().getEnumTypes().get(2); + return flyteidl.plugins.kubeflow.Common.getDescriptor().getEnumTypes().get(1); } private static final RestartPolicy[] VALUES = values(); @@ -375,8 +292,7 @@ public interface RunPolicyOrBuilder extends /** *
-     * CleanPodPolicy defines the policy to kill pods after the job completes.
-     * Default to None.
+     * CleanPodPolicy defines the policy to kill pods after the job completes. Default to None.
      * 
* * .flyteidl.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; @@ -384,8 +300,7 @@ public interface RunPolicyOrBuilder extends int getCleanPodPolicyValue(); /** *
-     * CleanPodPolicy defines the policy to kill pods after the job completes.
-     * Default to None.
+     * CleanPodPolicy defines the policy to kill pods after the job completes. Default to None.
      * 
* * .flyteidl.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; @@ -394,8 +309,7 @@ public interface RunPolicyOrBuilder extends /** *
-     * TTL to clean up jobs. It may take extra ReconcilePeriod seconds for the cleanup, since
-     * reconcile gets called periodically. Default to infinite.
+     * TTL to clean up jobs. Default to infinite.
      * 
* * int32 ttl_seconds_after_finished = 2; @@ -518,8 +432,7 @@ private RunPolicy( private int cleanPodPolicy_; /** *
-     * CleanPodPolicy defines the policy to kill pods after the job completes.
-     * Default to None.
+     * CleanPodPolicy defines the policy to kill pods after the job completes. Default to None.
      * 
* * .flyteidl.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; @@ -529,8 +442,7 @@ public int getCleanPodPolicyValue() { } /** *
-     * CleanPodPolicy defines the policy to kill pods after the job completes.
-     * Default to None.
+     * CleanPodPolicy defines the policy to kill pods after the job completes. Default to None.
      * 
* * .flyteidl.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; @@ -545,8 +457,7 @@ public flyteidl.plugins.kubeflow.Common.CleanPodPolicy getCleanPodPolicy() { private int ttlSecondsAfterFinished_; /** *
-     * TTL to clean up jobs. It may take extra ReconcilePeriod seconds for the cleanup, since
-     * reconcile gets called periodically. Default to infinite.
+     * TTL to clean up jobs. Default to infinite.
      * 
* * int32 ttl_seconds_after_finished = 2; @@ -596,7 +507,7 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (cleanPodPolicy_ != flyteidl.plugins.kubeflow.Common.CleanPodPolicy.CLEANPOD_POLICY_UNDEFINED.getNumber()) { + if (cleanPodPolicy_ != flyteidl.plugins.kubeflow.Common.CleanPodPolicy.CLEANPOD_POLICY_ALL.getNumber()) { output.writeEnum(1, cleanPodPolicy_); } if (ttlSecondsAfterFinished_ != 0) { @@ -617,7 +528,7 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (cleanPodPolicy_ != flyteidl.plugins.kubeflow.Common.CleanPodPolicy.CLEANPOD_POLICY_UNDEFINED.getNumber()) { + if (cleanPodPolicy_ != flyteidl.plugins.kubeflow.Common.CleanPodPolicy.CLEANPOD_POLICY_ALL.getNumber()) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, cleanPodPolicy_); } @@ -937,8 +848,7 @@ public Builder mergeFrom( private int cleanPodPolicy_ = 0; /** *
-       * CleanPodPolicy defines the policy to kill pods after the job completes.
-       * Default to None.
+       * CleanPodPolicy defines the policy to kill pods after the job completes. Default to None.
        * 
* * .flyteidl.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; @@ -948,8 +858,7 @@ public int getCleanPodPolicyValue() { } /** *
-       * CleanPodPolicy defines the policy to kill pods after the job completes.
-       * Default to None.
+       * CleanPodPolicy defines the policy to kill pods after the job completes. Default to None.
        * 
* * .flyteidl.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; @@ -961,8 +870,7 @@ public Builder setCleanPodPolicyValue(int value) { } /** *
-       * CleanPodPolicy defines the policy to kill pods after the job completes.
-       * Default to None.
+       * CleanPodPolicy defines the policy to kill pods after the job completes. Default to None.
        * 
* * .flyteidl.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; @@ -974,8 +882,7 @@ public flyteidl.plugins.kubeflow.Common.CleanPodPolicy getCleanPodPolicy() { } /** *
-       * CleanPodPolicy defines the policy to kill pods after the job completes.
-       * Default to None.
+       * CleanPodPolicy defines the policy to kill pods after the job completes. Default to None.
        * 
* * .flyteidl.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; @@ -991,8 +898,7 @@ public Builder setCleanPodPolicy(flyteidl.plugins.kubeflow.Common.CleanPodPolicy } /** *
-       * CleanPodPolicy defines the policy to kill pods after the job completes.
-       * Default to None.
+       * CleanPodPolicy defines the policy to kill pods after the job completes. Default to None.
        * 
* * .flyteidl.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; @@ -1007,8 +913,7 @@ public Builder clearCleanPodPolicy() { private int ttlSecondsAfterFinished_ ; /** *
-       * TTL to clean up jobs. It may take extra ReconcilePeriod seconds for the cleanup, since
-       * reconcile gets called periodically. Default to infinite.
+       * TTL to clean up jobs. Default to infinite.
        * 
* * int32 ttl_seconds_after_finished = 2; @@ -1018,8 +923,7 @@ public int getTtlSecondsAfterFinished() { } /** *
-       * TTL to clean up jobs. It may take extra ReconcilePeriod seconds for the cleanup, since
-       * reconcile gets called periodically. Default to infinite.
+       * TTL to clean up jobs. Default to infinite.
        * 
* * int32 ttl_seconds_after_finished = 2; @@ -1032,8 +936,7 @@ public Builder setTtlSecondsAfterFinished(int value) { } /** *
-       * TTL to clean up jobs. It may take extra ReconcilePeriod seconds for the cleanup, since
-       * reconcile gets called periodically. Default to infinite.
+       * TTL to clean up jobs. Default to infinite.
        * 
* * int32 ttl_seconds_after_finished = 2; @@ -1196,16 +1099,13 @@ public flyteidl.plugins.kubeflow.Common.RunPolicy getDefaultInstanceForType() { "lugins.kubeflow.CleanPodPolicy\022\"\n\032ttl_se" + "conds_after_finished\030\002 \001(\005\022\035\n\025activeDead" + "lineSeconds\030\003 \001(\005\022\025\n\rbackoff_limit\030\004 \001(\005" + - "*K\n\rSuccessPolicy\022\032\n\026SUCCESS_POLICY_DEFA" + - "ULT\020\000\022\036\n\032SUCCESS_POLICY_ALL_WORKERS\020\001*\177\n" + - "\016CleanPodPolicy\022\035\n\031CLEANPOD_POLICY_UNDEF" + - "INED\020\000\022\027\n\023CLEANPOD_POLICY_ALL\020\001\022\033\n\027CLEAN" + - "POD_POLICY_RUNNING\020\002\022\030\n\024CLEANPOD_POLICY_" + - "NONE\020\003*c\n\rRestartPolicy\022\031\n\025RESTART_POLIC" + - "Y_ALWAYS\020\000\022\035\n\031RESTART_POLICY_ON_FAILURE\020" + - "\001\022\030\n\024RESTART_POLICY_NEVER\020\002B9Z7github.co" + - "m/flyteorg/flyteidl/gen/pb-go/flyteidl/p" + - "luginsb\006proto3" + "*`\n\016CleanPodPolicy\022\027\n\023CLEANPOD_POLICY_AL" + + "L\020\000\022\033\n\027CLEANPOD_POLICY_RUNNING\020\001\022\030\n\024CLEA" + + "NPOD_POLICY_NONE\020\002*c\n\rRestartPolicy\022\031\n\025R" + + "ESTART_POLICY_ALWAYS\020\000\022\035\n\031RESTART_POLICY" + + "_ON_FAILURE\020\001\022\030\n\024RESTART_POLICY_NEVER\020\002B" + + "9Z7github.com/flyteorg/flyteidl/gen/pb-g" + + "o/flyteidl/pluginsb\006proto3" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { diff --git a/gen/pb-java/flyteidl/plugins/kubeflow/Mpi.java b/gen/pb-java/flyteidl/plugins/kubeflow/Mpi.java index 6da5191f0..548266e03 100644 --- a/gen/pb-java/flyteidl/plugins/kubeflow/Mpi.java +++ b/gen/pb-java/flyteidl/plugins/kubeflow/Mpi.java @@ -98,23 +98,6 @@ public interface DistributedMPITrainingTaskOrBuilder extends * .flyteidl.plugins.kubeflow.RunPolicy run_policy = 3; */ flyteidl.plugins.kubeflow.Common.RunPolicyOrBuilder getRunPolicyOrBuilder(); - - /** - *
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - int getSuccessPolicyValue(); - /** - *
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - flyteidl.plugins.kubeflow.Common.SuccessPolicy getSuccessPolicy(); } /** *
@@ -133,7 +116,6 @@ private DistributedMPITrainingTask(com.google.protobuf.GeneratedMessageV3.Builde
       super(builder);
     }
     private DistributedMPITrainingTask() {
-      successPolicy_ = 0;
     }
 
     @java.lang.Override
@@ -199,12 +181,6 @@ private DistributedMPITrainingTask(
 
               break;
             }
-            case 32: {
-              int rawValue = input.readEnum();
-
-              successPolicy_ = rawValue;
-              break;
-            }
             default: {
               if (!parseUnknownField(
                   input, unknownFields, extensionRegistry, tag)) {
@@ -342,31 +318,6 @@ public flyteidl.plugins.kubeflow.Common.RunPolicyOrBuilder getRunPolicyOrBuilder
       return getRunPolicy();
     }
 
-    public static final int SUCCESS_POLICY_FIELD_NUMBER = 4;
-    private int successPolicy_;
-    /**
-     * 
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public int getSuccessPolicyValue() { - return successPolicy_; - } - /** - *
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public flyteidl.plugins.kubeflow.Common.SuccessPolicy getSuccessPolicy() { - @SuppressWarnings("deprecation") - flyteidl.plugins.kubeflow.Common.SuccessPolicy result = flyteidl.plugins.kubeflow.Common.SuccessPolicy.valueOf(successPolicy_); - return result == null ? flyteidl.plugins.kubeflow.Common.SuccessPolicy.UNRECOGNIZED : result; - } - private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -390,9 +341,6 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (runPolicy_ != null) { output.writeMessage(3, getRunPolicy()); } - if (successPolicy_ != flyteidl.plugins.kubeflow.Common.SuccessPolicy.SUCCESS_POLICY_DEFAULT.getNumber()) { - output.writeEnum(4, successPolicy_); - } unknownFields.writeTo(output); } @@ -414,10 +362,6 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, getRunPolicy()); } - if (successPolicy_ != flyteidl.plugins.kubeflow.Common.SuccessPolicy.SUCCESS_POLICY_DEFAULT.getNumber()) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, successPolicy_); - } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -448,7 +392,6 @@ public boolean equals(final java.lang.Object obj) { if (!getRunPolicy() .equals(other.getRunPolicy())) return false; } - if (successPolicy_ != other.successPolicy_) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -472,8 +415,6 @@ public int hashCode() { hash = (37 * hash) + RUN_POLICY_FIELD_NUMBER; hash = (53 * hash) + getRunPolicy().hashCode(); } - hash = (37 * hash) + SUCCESS_POLICY_FIELD_NUMBER; - hash = (53 * hash) + successPolicy_; hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -629,8 +570,6 @@ public Builder clear() { runPolicy_ = null; runPolicyBuilder_ = null; } - successPolicy_ = 0; - return this; } @@ -672,7 +611,6 @@ public flyteidl.plugins.kubeflow.Mpi.DistributedMPITrainingTask buildPartial() { } else { result.runPolicy_ = runPolicyBuilder_.build(); } - result.successPolicy_ = successPolicy_; onBuilt(); return result; } @@ -730,9 +668,6 @@ public Builder mergeFrom(flyteidl.plugins.kubeflow.Mpi.DistributedMPITrainingTas if (other.hasRunPolicy()) { mergeRunPolicy(other.getRunPolicy()); } - if (other.successPolicy_ != 0) { - setSuccessPolicyValue(other.getSuccessPolicyValue()); - } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1238,71 +1173,6 @@ public flyteidl.plugins.kubeflow.Common.RunPolicyOrBuilder getRunPolicyOrBuilder } return runPolicyBuilder_; } - - private int successPolicy_ = 0; - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public int getSuccessPolicyValue() { - return successPolicy_; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public Builder setSuccessPolicyValue(int value) { - successPolicy_ = value; - onChanged(); - return this; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public flyteidl.plugins.kubeflow.Common.SuccessPolicy getSuccessPolicy() { - @SuppressWarnings("deprecation") - flyteidl.plugins.kubeflow.Common.SuccessPolicy result = flyteidl.plugins.kubeflow.Common.SuccessPolicy.valueOf(successPolicy_); - return result == null ? flyteidl.plugins.kubeflow.Common.SuccessPolicy.UNRECOGNIZED : result; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public Builder setSuccessPolicy(flyteidl.plugins.kubeflow.Common.SuccessPolicy value) { - if (value == null) { - throw new NullPointerException(); - } - - successPolicy_ = value.getNumber(); - onChanged(); - return this; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public Builder clearSuccessPolicy() { - - successPolicy_ = 0; - onChanged(); - return this; - } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -1362,7 +1232,7 @@ public interface DistributedMPITrainingReplicaSpecOrBuilder extends /** *
-     * Number of workers
+     * Number of replicas
      * 
* * int32 replicas = 1; @@ -1371,38 +1241,61 @@ public interface DistributedMPITrainingReplicaSpecOrBuilder extends /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ - java.lang.String getPodTemplateName(); + java.lang.String getImage(); /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ com.google.protobuf.ByteString - getPodTemplateNameBytes(); + getImageBytes(); /** *
-     * Restart policy for the worker
+     * Resources required for the replica group
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.core.Resources resources = 3; + */ + boolean hasResources(); + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + flyteidl.core.Tasks.Resources getResources(); + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + flyteidl.core.Tasks.ResourcesOrBuilder getResourcesOrBuilder(); + + /** + *
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
+     * 
+ * + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ int getRestartPolicyValue(); /** *
-     * Restart policy for the worker
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy(); } @@ -1419,7 +1312,7 @@ private DistributedMPITrainingReplicaSpec(com.google.protobuf.GeneratedMessageV3 super(builder); } private DistributedMPITrainingReplicaSpec() { - podTemplateName_ = ""; + image_ = ""; restartPolicy_ = 0; } @@ -1455,10 +1348,23 @@ private DistributedMPITrainingReplicaSpec( case 18: { java.lang.String s = input.readStringRequireUtf8(); - podTemplateName_ = s; + image_ = s; break; } - case 24: { + case 26: { + flyteidl.core.Tasks.Resources.Builder subBuilder = null; + if (resources_ != null) { + subBuilder = resources_.toBuilder(); + } + resources_ = input.readMessage(flyteidl.core.Tasks.Resources.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(resources_); + resources_ = subBuilder.buildPartial(); + } + + break; + } + case 32: { int rawValue = input.readEnum(); restartPolicy_ = rawValue; @@ -1500,7 +1406,7 @@ private DistributedMPITrainingReplicaSpec( private int replicas_; /** *
-     * Number of workers
+     * Number of replicas
      * 
* * int32 replicas = 1; @@ -1509,68 +1415,99 @@ public int getReplicas() { return replicas_; } - public static final int POD_TEMPLATE_NAME_FIELD_NUMBER = 2; - private volatile java.lang.Object podTemplateName_; + public static final int IMAGE_FIELD_NUMBER = 2; + private volatile java.lang.Object image_; /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ - public java.lang.String getPodTemplateName() { - java.lang.Object ref = podTemplateName_; + public java.lang.String getImage() { + java.lang.Object ref = image_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); - podTemplateName_ = s; + image_ = s; return s; } } /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ public com.google.protobuf.ByteString - getPodTemplateNameBytes() { - java.lang.Object ref = podTemplateName_; + getImageBytes() { + java.lang.Object ref = image_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - podTemplateName_ = b; + image_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int RESTART_POLICY_FIELD_NUMBER = 3; + public static final int RESOURCES_FIELD_NUMBER = 3; + private flyteidl.core.Tasks.Resources resources_; + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public boolean hasResources() { + return resources_ != null; + } + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.Resources getResources() { + return resources_ == null ? flyteidl.core.Tasks.Resources.getDefaultInstance() : resources_; + } + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.ResourcesOrBuilder getResourcesOrBuilder() { + return getResources(); + } + + public static final int RESTART_POLICY_FIELD_NUMBER = 4; private int restartPolicy_; /** *
-     * Restart policy for the worker
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public int getRestartPolicyValue() { return restartPolicy_; } /** *
-     * Restart policy for the worker
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy() { @SuppressWarnings("deprecation") @@ -1595,11 +1532,14 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (replicas_ != 0) { output.writeInt32(1, replicas_); } - if (!getPodTemplateNameBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, podTemplateName_); + if (!getImageBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, image_); + } + if (resources_ != null) { + output.writeMessage(3, getResources()); } if (restartPolicy_ != flyteidl.plugins.kubeflow.Common.RestartPolicy.RESTART_POLICY_ALWAYS.getNumber()) { - output.writeEnum(3, restartPolicy_); + output.writeEnum(4, restartPolicy_); } unknownFields.writeTo(output); } @@ -1614,12 +1554,16 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt32Size(1, replicas_); } - if (!getPodTemplateNameBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, podTemplateName_); + if (!getImageBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, image_); + } + if (resources_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getResources()); } if (restartPolicy_ != flyteidl.plugins.kubeflow.Common.RestartPolicy.RESTART_POLICY_ALWAYS.getNumber()) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(3, restartPolicy_); + .computeEnumSize(4, restartPolicy_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -1638,8 +1582,13 @@ public boolean equals(final java.lang.Object obj) { if (getReplicas() != other.getReplicas()) return false; - if (!getPodTemplateName() - .equals(other.getPodTemplateName())) return false; + if (!getImage() + .equals(other.getImage())) return false; + if (hasResources() != other.hasResources()) return false; + if (hasResources()) { + if (!getResources() + .equals(other.getResources())) return false; + } if (restartPolicy_ != other.restartPolicy_) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; @@ -1654,8 +1603,12 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + REPLICAS_FIELD_NUMBER; hash = (53 * hash) + getReplicas(); - hash = (37 * hash) + POD_TEMPLATE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getPodTemplateName().hashCode(); + hash = (37 * hash) + IMAGE_FIELD_NUMBER; + hash = (53 * hash) + getImage().hashCode(); + if (hasResources()) { + hash = (37 * hash) + RESOURCES_FIELD_NUMBER; + hash = (53 * hash) + getResources().hashCode(); + } hash = (37 * hash) + RESTART_POLICY_FIELD_NUMBER; hash = (53 * hash) + restartPolicy_; hash = (29 * hash) + unknownFields.hashCode(); @@ -1793,8 +1746,14 @@ public Builder clear() { super.clear(); replicas_ = 0; - podTemplateName_ = ""; + image_ = ""; + if (resourcesBuilder_ == null) { + resources_ = null; + } else { + resources_ = null; + resourcesBuilder_ = null; + } restartPolicy_ = 0; return this; @@ -1824,7 +1783,12 @@ public flyteidl.plugins.kubeflow.Mpi.DistributedMPITrainingReplicaSpec build() { public flyteidl.plugins.kubeflow.Mpi.DistributedMPITrainingReplicaSpec buildPartial() { flyteidl.plugins.kubeflow.Mpi.DistributedMPITrainingReplicaSpec result = new flyteidl.plugins.kubeflow.Mpi.DistributedMPITrainingReplicaSpec(this); result.replicas_ = replicas_; - result.podTemplateName_ = podTemplateName_; + result.image_ = image_; + if (resourcesBuilder_ == null) { + result.resources_ = resources_; + } else { + result.resources_ = resourcesBuilder_.build(); + } result.restartPolicy_ = restartPolicy_; onBuilt(); return result; @@ -1877,10 +1841,13 @@ public Builder mergeFrom(flyteidl.plugins.kubeflow.Mpi.DistributedMPITrainingRep if (other.getReplicas() != 0) { setReplicas(other.getReplicas()); } - if (!other.getPodTemplateName().isEmpty()) { - podTemplateName_ = other.podTemplateName_; + if (!other.getImage().isEmpty()) { + image_ = other.image_; onChanged(); } + if (other.hasResources()) { + mergeResources(other.getResources()); + } if (other.restartPolicy_ != 0) { setRestartPolicyValue(other.getRestartPolicyValue()); } @@ -1916,7 +1883,7 @@ public Builder mergeFrom( private int replicas_ ; /** *
-       * Number of workers
+       * Number of replicas
        * 
* * int32 replicas = 1; @@ -1926,7 +1893,7 @@ public int getReplicas() { } /** *
-       * Number of workers
+       * Number of replicas
        * 
* * int32 replicas = 1; @@ -1939,7 +1906,7 @@ public Builder setReplicas(int value) { } /** *
-       * Number of workers
+       * Number of replicas
        * 
* * int32 replicas = 1; @@ -1951,22 +1918,21 @@ public Builder clearReplicas() { return this; } - private java.lang.Object podTemplateName_ = ""; + private java.lang.Object image_ = ""; /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public java.lang.String getPodTemplateName() { - java.lang.Object ref = podTemplateName_; + public java.lang.String getImage() { + java.lang.Object ref = image_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); - podTemplateName_ = s; + image_ = s; return s; } else { return (java.lang.String) ref; @@ -1974,20 +1940,19 @@ public java.lang.String getPodTemplateName() { } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ public com.google.protobuf.ByteString - getPodTemplateNameBytes() { - java.lang.Object ref = podTemplateName_; + getImageBytes() { + java.lang.Object ref = image_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - podTemplateName_ = b; + image_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -1995,73 +1960,223 @@ public java.lang.String getPodTemplateName() { } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public Builder setPodTemplateName( + public Builder setImage( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - podTemplateName_ = value; + image_ = value; onChanged(); return this; } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public Builder clearPodTemplateName() { + public Builder clearImage() { - podTemplateName_ = getDefaultInstance().getPodTemplateName(); + image_ = getDefaultInstance().getImage(); onChanged(); return this; } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public Builder setPodTemplateNameBytes( + public Builder setImageBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); - podTemplateName_ = value; + image_ = value; onChanged(); return this; } + private flyteidl.core.Tasks.Resources resources_; + private com.google.protobuf.SingleFieldBuilderV3< + flyteidl.core.Tasks.Resources, flyteidl.core.Tasks.Resources.Builder, flyteidl.core.Tasks.ResourcesOrBuilder> resourcesBuilder_; + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public boolean hasResources() { + return resourcesBuilder_ != null || resources_ != null; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.Resources getResources() { + if (resourcesBuilder_ == null) { + return resources_ == null ? flyteidl.core.Tasks.Resources.getDefaultInstance() : resources_; + } else { + return resourcesBuilder_.getMessage(); + } + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder setResources(flyteidl.core.Tasks.Resources value) { + if (resourcesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + resources_ = value; + onChanged(); + } else { + resourcesBuilder_.setMessage(value); + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder setResources( + flyteidl.core.Tasks.Resources.Builder builderForValue) { + if (resourcesBuilder_ == null) { + resources_ = builderForValue.build(); + onChanged(); + } else { + resourcesBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder mergeResources(flyteidl.core.Tasks.Resources value) { + if (resourcesBuilder_ == null) { + if (resources_ != null) { + resources_ = + flyteidl.core.Tasks.Resources.newBuilder(resources_).mergeFrom(value).buildPartial(); + } else { + resources_ = value; + } + onChanged(); + } else { + resourcesBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder clearResources() { + if (resourcesBuilder_ == null) { + resources_ = null; + onChanged(); + } else { + resources_ = null; + resourcesBuilder_ = null; + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.Resources.Builder getResourcesBuilder() { + + onChanged(); + return getResourcesFieldBuilder().getBuilder(); + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.ResourcesOrBuilder getResourcesOrBuilder() { + if (resourcesBuilder_ != null) { + return resourcesBuilder_.getMessageOrBuilder(); + } else { + return resources_ == null ? + flyteidl.core.Tasks.Resources.getDefaultInstance() : resources_; + } + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + flyteidl.core.Tasks.Resources, flyteidl.core.Tasks.Resources.Builder, flyteidl.core.Tasks.ResourcesOrBuilder> + getResourcesFieldBuilder() { + if (resourcesBuilder_ == null) { + resourcesBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + flyteidl.core.Tasks.Resources, flyteidl.core.Tasks.Resources.Builder, flyteidl.core.Tasks.ResourcesOrBuilder>( + getResources(), + getParentForChildren(), + isClean()); + resources_ = null; + } + return resourcesBuilder_; + } + private int restartPolicy_ = 0; /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public int getRestartPolicyValue() { return restartPolicy_; } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public Builder setRestartPolicyValue(int value) { restartPolicy_ = value; @@ -2070,10 +2185,10 @@ public Builder setRestartPolicyValue(int value) { } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy() { @SuppressWarnings("deprecation") @@ -2082,10 +2197,10 @@ public flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy() { } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public Builder setRestartPolicy(flyteidl.plugins.kubeflow.Common.RestartPolicy value) { if (value == null) { @@ -2098,10 +2213,10 @@ public Builder setRestartPolicy(flyteidl.plugins.kubeflow.Common.RestartPolicy v } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public Builder clearRestartPolicy() { @@ -2182,22 +2297,22 @@ public flyteidl.plugins.kubeflow.Mpi.DistributedMPITrainingReplicaSpec getDefaul static { java.lang.String[] descriptorData = { "\n#flyteidl/plugins/kubeflow/mpi.proto\022\031f" + - "lyteidl.plugins.kubeflow\032&flyteidl/plugi" + - "ns/kubeflow/common.proto\"\310\002\n\032Distributed" + - "MPITrainingTask\022U\n\017worker_replicas\030\001 \001(\013" + - "2<.flyteidl.plugins.kubeflow.Distributed" + - "MPITrainingReplicaSpec\022W\n\021launcher_repli" + - "cas\030\002 \001(\0132<.flyteidl.plugins.kubeflow.Di" + - "stributedMPITrainingReplicaSpec\0228\n\nrun_p" + - "olicy\030\003 \001(\0132$.flyteidl.plugins.kubeflow." + - "RunPolicy\022@\n\016success_policy\030\004 \001(\0162(.flyt" + - "eidl.plugins.kubeflow.SuccessPolicy\"\222\001\n!" + - "DistributedMPITrainingReplicaSpec\022\020\n\010rep" + - "licas\030\001 \001(\005\022\031\n\021pod_template_name\030\002 \001(\t\022@" + - "\n\016restart_policy\030\003 \001(\0162(.flyteidl.plugin" + - "s.kubeflow.RestartPolicyB9Z7github.com/f" + - "lyteorg/flyteidl/gen/pb-go/flyteidl/plug" + - "insb\006proto3" + "lyteidl.plugins.kubeflow\032\031flyteidl/core/" + + "tasks.proto\032&flyteidl/plugins/kubeflow/c" + + "ommon.proto\"\206\002\n\032DistributedMPITrainingTa" + + "sk\022U\n\017worker_replicas\030\001 \001(\0132<.flyteidl.p" + + "lugins.kubeflow.DistributedMPITrainingRe" + + "plicaSpec\022W\n\021launcher_replicas\030\002 \001(\0132<.f" + + "lyteidl.plugins.kubeflow.DistributedMPIT" + + "rainingReplicaSpec\0228\n\nrun_policy\030\003 \001(\0132$" + + ".flyteidl.plugins.kubeflow.RunPolicy\"\263\001\n" + + "!DistributedMPITrainingReplicaSpec\022\020\n\010re" + + "plicas\030\001 \001(\005\022\r\n\005image\030\002 \001(\t\022+\n\tresources" + + "\030\003 \001(\0132\030.flyteidl.core.Resources\022@\n\016rest" + + "art_policy\030\004 \001(\0162(.flyteidl.plugins.kube" + + "flow.RestartPolicyB9Z7github.com/flyteor" + + "g/flyteidl/gen/pb-go/flyteidl/pluginsb\006p" + + "roto3" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -2210,6 +2325,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + flyteidl.core.Tasks.getDescriptor(), flyteidl.plugins.kubeflow.Common.getDescriptor(), }, assigner); internal_static_flyteidl_plugins_kubeflow_DistributedMPITrainingTask_descriptor = @@ -2217,13 +2333,14 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_flyteidl_plugins_kubeflow_DistributedMPITrainingTask_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_flyteidl_plugins_kubeflow_DistributedMPITrainingTask_descriptor, - new java.lang.String[] { "WorkerReplicas", "LauncherReplicas", "RunPolicy", "SuccessPolicy", }); + new java.lang.String[] { "WorkerReplicas", "LauncherReplicas", "RunPolicy", }); internal_static_flyteidl_plugins_kubeflow_DistributedMPITrainingReplicaSpec_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_flyteidl_plugins_kubeflow_DistributedMPITrainingReplicaSpec_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_flyteidl_plugins_kubeflow_DistributedMPITrainingReplicaSpec_descriptor, - new java.lang.String[] { "Replicas", "PodTemplateName", "RestartPolicy", }); + new java.lang.String[] { "Replicas", "Image", "Resources", "RestartPolicy", }); + flyteidl.core.Tasks.getDescriptor(); flyteidl.plugins.kubeflow.Common.getDescriptor(); } diff --git a/gen/pb-java/flyteidl/plugins/kubeflow/Pytorch.java b/gen/pb-java/flyteidl/plugins/kubeflow/Pytorch.java index 389ef9b3d..b7d295f4d 100644 --- a/gen/pb-java/flyteidl/plugins/kubeflow/Pytorch.java +++ b/gen/pb-java/flyteidl/plugins/kubeflow/Pytorch.java @@ -98,23 +98,6 @@ public interface DistributedPyTorchTrainingTaskOrBuilder extends * .flyteidl.plugins.kubeflow.RunPolicy run_policy = 3; */ flyteidl.plugins.kubeflow.Common.RunPolicyOrBuilder getRunPolicyOrBuilder(); - - /** - *
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - int getSuccessPolicyValue(); - /** - *
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - flyteidl.plugins.kubeflow.Common.SuccessPolicy getSuccessPolicy(); } /** *
@@ -133,7 +116,6 @@ private DistributedPyTorchTrainingTask(com.google.protobuf.GeneratedMessageV3.Bu
       super(builder);
     }
     private DistributedPyTorchTrainingTask() {
-      successPolicy_ = 0;
     }
 
     @java.lang.Override
@@ -199,12 +181,6 @@ private DistributedPyTorchTrainingTask(
 
               break;
             }
-            case 32: {
-              int rawValue = input.readEnum();
-
-              successPolicy_ = rawValue;
-              break;
-            }
             default: {
               if (!parseUnknownField(
                   input, unknownFields, extensionRegistry, tag)) {
@@ -342,31 +318,6 @@ public flyteidl.plugins.kubeflow.Common.RunPolicyOrBuilder getRunPolicyOrBuilder
       return getRunPolicy();
     }
 
-    public static final int SUCCESS_POLICY_FIELD_NUMBER = 4;
-    private int successPolicy_;
-    /**
-     * 
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public int getSuccessPolicyValue() { - return successPolicy_; - } - /** - *
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public flyteidl.plugins.kubeflow.Common.SuccessPolicy getSuccessPolicy() { - @SuppressWarnings("deprecation") - flyteidl.plugins.kubeflow.Common.SuccessPolicy result = flyteidl.plugins.kubeflow.Common.SuccessPolicy.valueOf(successPolicy_); - return result == null ? flyteidl.plugins.kubeflow.Common.SuccessPolicy.UNRECOGNIZED : result; - } - private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -390,9 +341,6 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (runPolicy_ != null) { output.writeMessage(3, getRunPolicy()); } - if (successPolicy_ != flyteidl.plugins.kubeflow.Common.SuccessPolicy.SUCCESS_POLICY_DEFAULT.getNumber()) { - output.writeEnum(4, successPolicy_); - } unknownFields.writeTo(output); } @@ -414,10 +362,6 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, getRunPolicy()); } - if (successPolicy_ != flyteidl.plugins.kubeflow.Common.SuccessPolicy.SUCCESS_POLICY_DEFAULT.getNumber()) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, successPolicy_); - } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -448,7 +392,6 @@ public boolean equals(final java.lang.Object obj) { if (!getRunPolicy() .equals(other.getRunPolicy())) return false; } - if (successPolicy_ != other.successPolicy_) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -472,8 +415,6 @@ public int hashCode() { hash = (37 * hash) + RUN_POLICY_FIELD_NUMBER; hash = (53 * hash) + getRunPolicy().hashCode(); } - hash = (37 * hash) + SUCCESS_POLICY_FIELD_NUMBER; - hash = (53 * hash) + successPolicy_; hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -629,8 +570,6 @@ public Builder clear() { runPolicy_ = null; runPolicyBuilder_ = null; } - successPolicy_ = 0; - return this; } @@ -672,7 +611,6 @@ public flyteidl.plugins.kubeflow.Pytorch.DistributedPyTorchTrainingTask buildPar } else { result.runPolicy_ = runPolicyBuilder_.build(); } - result.successPolicy_ = successPolicy_; onBuilt(); return result; } @@ -730,9 +668,6 @@ public Builder mergeFrom(flyteidl.plugins.kubeflow.Pytorch.DistributedPyTorchTra if (other.hasRunPolicy()) { mergeRunPolicy(other.getRunPolicy()); } - if (other.successPolicy_ != 0) { - setSuccessPolicyValue(other.getSuccessPolicyValue()); - } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1238,71 +1173,6 @@ public flyteidl.plugins.kubeflow.Common.RunPolicyOrBuilder getRunPolicyOrBuilder } return runPolicyBuilder_; } - - private int successPolicy_ = 0; - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public int getSuccessPolicyValue() { - return successPolicy_; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public Builder setSuccessPolicyValue(int value) { - successPolicy_ = value; - onChanged(); - return this; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public flyteidl.plugins.kubeflow.Common.SuccessPolicy getSuccessPolicy() { - @SuppressWarnings("deprecation") - flyteidl.plugins.kubeflow.Common.SuccessPolicy result = flyteidl.plugins.kubeflow.Common.SuccessPolicy.valueOf(successPolicy_); - return result == null ? flyteidl.plugins.kubeflow.Common.SuccessPolicy.UNRECOGNIZED : result; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public Builder setSuccessPolicy(flyteidl.plugins.kubeflow.Common.SuccessPolicy value) { - if (value == null) { - throw new NullPointerException(); - } - - successPolicy_ = value.getNumber(); - onChanged(); - return this; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 4; - */ - public Builder clearSuccessPolicy() { - - successPolicy_ = 0; - onChanged(); - return this; - } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -1362,7 +1232,7 @@ public interface DistributedPyTorchTrainingReplicaSpecOrBuilder extends /** *
-     * Number of workers
+     * Number of replicas
      * 
* * int32 replicas = 1; @@ -1371,38 +1241,61 @@ public interface DistributedPyTorchTrainingReplicaSpecOrBuilder extends /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ - java.lang.String getPodTemplateName(); + java.lang.String getImage(); /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ com.google.protobuf.ByteString - getPodTemplateNameBytes(); + getImageBytes(); /** *
-     * Restart policy for the worker
+     * Resources required for the replica group
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.core.Resources resources = 3; + */ + boolean hasResources(); + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + flyteidl.core.Tasks.Resources getResources(); + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + flyteidl.core.Tasks.ResourcesOrBuilder getResourcesOrBuilder(); + + /** + *
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
+     * 
+ * + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ int getRestartPolicyValue(); /** *
-     * Restart policy for the worker
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy(); } @@ -1419,7 +1312,7 @@ private DistributedPyTorchTrainingReplicaSpec(com.google.protobuf.GeneratedMessa super(builder); } private DistributedPyTorchTrainingReplicaSpec() { - podTemplateName_ = ""; + image_ = ""; restartPolicy_ = 0; } @@ -1455,10 +1348,23 @@ private DistributedPyTorchTrainingReplicaSpec( case 18: { java.lang.String s = input.readStringRequireUtf8(); - podTemplateName_ = s; + image_ = s; break; } - case 24: { + case 26: { + flyteidl.core.Tasks.Resources.Builder subBuilder = null; + if (resources_ != null) { + subBuilder = resources_.toBuilder(); + } + resources_ = input.readMessage(flyteidl.core.Tasks.Resources.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(resources_); + resources_ = subBuilder.buildPartial(); + } + + break; + } + case 32: { int rawValue = input.readEnum(); restartPolicy_ = rawValue; @@ -1500,7 +1406,7 @@ private DistributedPyTorchTrainingReplicaSpec( private int replicas_; /** *
-     * Number of workers
+     * Number of replicas
      * 
* * int32 replicas = 1; @@ -1509,68 +1415,99 @@ public int getReplicas() { return replicas_; } - public static final int POD_TEMPLATE_NAME_FIELD_NUMBER = 2; - private volatile java.lang.Object podTemplateName_; + public static final int IMAGE_FIELD_NUMBER = 2; + private volatile java.lang.Object image_; /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ - public java.lang.String getPodTemplateName() { - java.lang.Object ref = podTemplateName_; + public java.lang.String getImage() { + java.lang.Object ref = image_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); - podTemplateName_ = s; + image_ = s; return s; } } /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ public com.google.protobuf.ByteString - getPodTemplateNameBytes() { - java.lang.Object ref = podTemplateName_; + getImageBytes() { + java.lang.Object ref = image_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - podTemplateName_ = b; + image_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int RESTART_POLICY_FIELD_NUMBER = 3; + public static final int RESOURCES_FIELD_NUMBER = 3; + private flyteidl.core.Tasks.Resources resources_; + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public boolean hasResources() { + return resources_ != null; + } + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.Resources getResources() { + return resources_ == null ? flyteidl.core.Tasks.Resources.getDefaultInstance() : resources_; + } + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.ResourcesOrBuilder getResourcesOrBuilder() { + return getResources(); + } + + public static final int RESTART_POLICY_FIELD_NUMBER = 4; private int restartPolicy_; /** *
-     * Restart policy for the worker
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public int getRestartPolicyValue() { return restartPolicy_; } /** *
-     * Restart policy for the worker
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy() { @SuppressWarnings("deprecation") @@ -1595,11 +1532,14 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (replicas_ != 0) { output.writeInt32(1, replicas_); } - if (!getPodTemplateNameBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, podTemplateName_); + if (!getImageBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, image_); + } + if (resources_ != null) { + output.writeMessage(3, getResources()); } if (restartPolicy_ != flyteidl.plugins.kubeflow.Common.RestartPolicy.RESTART_POLICY_ALWAYS.getNumber()) { - output.writeEnum(3, restartPolicy_); + output.writeEnum(4, restartPolicy_); } unknownFields.writeTo(output); } @@ -1614,12 +1554,16 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt32Size(1, replicas_); } - if (!getPodTemplateNameBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, podTemplateName_); + if (!getImageBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, image_); + } + if (resources_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getResources()); } if (restartPolicy_ != flyteidl.plugins.kubeflow.Common.RestartPolicy.RESTART_POLICY_ALWAYS.getNumber()) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(3, restartPolicy_); + .computeEnumSize(4, restartPolicy_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -1638,8 +1582,13 @@ public boolean equals(final java.lang.Object obj) { if (getReplicas() != other.getReplicas()) return false; - if (!getPodTemplateName() - .equals(other.getPodTemplateName())) return false; + if (!getImage() + .equals(other.getImage())) return false; + if (hasResources() != other.hasResources()) return false; + if (hasResources()) { + if (!getResources() + .equals(other.getResources())) return false; + } if (restartPolicy_ != other.restartPolicy_) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; @@ -1654,8 +1603,12 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + REPLICAS_FIELD_NUMBER; hash = (53 * hash) + getReplicas(); - hash = (37 * hash) + POD_TEMPLATE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getPodTemplateName().hashCode(); + hash = (37 * hash) + IMAGE_FIELD_NUMBER; + hash = (53 * hash) + getImage().hashCode(); + if (hasResources()) { + hash = (37 * hash) + RESOURCES_FIELD_NUMBER; + hash = (53 * hash) + getResources().hashCode(); + } hash = (37 * hash) + RESTART_POLICY_FIELD_NUMBER; hash = (53 * hash) + restartPolicy_; hash = (29 * hash) + unknownFields.hashCode(); @@ -1793,8 +1746,14 @@ public Builder clear() { super.clear(); replicas_ = 0; - podTemplateName_ = ""; + image_ = ""; + if (resourcesBuilder_ == null) { + resources_ = null; + } else { + resources_ = null; + resourcesBuilder_ = null; + } restartPolicy_ = 0; return this; @@ -1824,7 +1783,12 @@ public flyteidl.plugins.kubeflow.Pytorch.DistributedPyTorchTrainingReplicaSpec b public flyteidl.plugins.kubeflow.Pytorch.DistributedPyTorchTrainingReplicaSpec buildPartial() { flyteidl.plugins.kubeflow.Pytorch.DistributedPyTorchTrainingReplicaSpec result = new flyteidl.plugins.kubeflow.Pytorch.DistributedPyTorchTrainingReplicaSpec(this); result.replicas_ = replicas_; - result.podTemplateName_ = podTemplateName_; + result.image_ = image_; + if (resourcesBuilder_ == null) { + result.resources_ = resources_; + } else { + result.resources_ = resourcesBuilder_.build(); + } result.restartPolicy_ = restartPolicy_; onBuilt(); return result; @@ -1877,10 +1841,13 @@ public Builder mergeFrom(flyteidl.plugins.kubeflow.Pytorch.DistributedPyTorchTra if (other.getReplicas() != 0) { setReplicas(other.getReplicas()); } - if (!other.getPodTemplateName().isEmpty()) { - podTemplateName_ = other.podTemplateName_; + if (!other.getImage().isEmpty()) { + image_ = other.image_; onChanged(); } + if (other.hasResources()) { + mergeResources(other.getResources()); + } if (other.restartPolicy_ != 0) { setRestartPolicyValue(other.getRestartPolicyValue()); } @@ -1916,7 +1883,7 @@ public Builder mergeFrom( private int replicas_ ; /** *
-       * Number of workers
+       * Number of replicas
        * 
* * int32 replicas = 1; @@ -1926,7 +1893,7 @@ public int getReplicas() { } /** *
-       * Number of workers
+       * Number of replicas
        * 
* * int32 replicas = 1; @@ -1939,7 +1906,7 @@ public Builder setReplicas(int value) { } /** *
-       * Number of workers
+       * Number of replicas
        * 
* * int32 replicas = 1; @@ -1951,22 +1918,21 @@ public Builder clearReplicas() { return this; } - private java.lang.Object podTemplateName_ = ""; + private java.lang.Object image_ = ""; /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public java.lang.String getPodTemplateName() { - java.lang.Object ref = podTemplateName_; + public java.lang.String getImage() { + java.lang.Object ref = image_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); - podTemplateName_ = s; + image_ = s; return s; } else { return (java.lang.String) ref; @@ -1974,20 +1940,19 @@ public java.lang.String getPodTemplateName() { } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ public com.google.protobuf.ByteString - getPodTemplateNameBytes() { - java.lang.Object ref = podTemplateName_; + getImageBytes() { + java.lang.Object ref = image_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - podTemplateName_ = b; + image_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -1995,73 +1960,223 @@ public java.lang.String getPodTemplateName() { } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public Builder setPodTemplateName( + public Builder setImage( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - podTemplateName_ = value; + image_ = value; onChanged(); return this; } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public Builder clearPodTemplateName() { + public Builder clearImage() { - podTemplateName_ = getDefaultInstance().getPodTemplateName(); + image_ = getDefaultInstance().getImage(); onChanged(); return this; } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public Builder setPodTemplateNameBytes( + public Builder setImageBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); - podTemplateName_ = value; + image_ = value; onChanged(); return this; } + private flyteidl.core.Tasks.Resources resources_; + private com.google.protobuf.SingleFieldBuilderV3< + flyteidl.core.Tasks.Resources, flyteidl.core.Tasks.Resources.Builder, flyteidl.core.Tasks.ResourcesOrBuilder> resourcesBuilder_; + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public boolean hasResources() { + return resourcesBuilder_ != null || resources_ != null; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.Resources getResources() { + if (resourcesBuilder_ == null) { + return resources_ == null ? flyteidl.core.Tasks.Resources.getDefaultInstance() : resources_; + } else { + return resourcesBuilder_.getMessage(); + } + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder setResources(flyteidl.core.Tasks.Resources value) { + if (resourcesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + resources_ = value; + onChanged(); + } else { + resourcesBuilder_.setMessage(value); + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder setResources( + flyteidl.core.Tasks.Resources.Builder builderForValue) { + if (resourcesBuilder_ == null) { + resources_ = builderForValue.build(); + onChanged(); + } else { + resourcesBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder mergeResources(flyteidl.core.Tasks.Resources value) { + if (resourcesBuilder_ == null) { + if (resources_ != null) { + resources_ = + flyteidl.core.Tasks.Resources.newBuilder(resources_).mergeFrom(value).buildPartial(); + } else { + resources_ = value; + } + onChanged(); + } else { + resourcesBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder clearResources() { + if (resourcesBuilder_ == null) { + resources_ = null; + onChanged(); + } else { + resources_ = null; + resourcesBuilder_ = null; + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.Resources.Builder getResourcesBuilder() { + + onChanged(); + return getResourcesFieldBuilder().getBuilder(); + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.ResourcesOrBuilder getResourcesOrBuilder() { + if (resourcesBuilder_ != null) { + return resourcesBuilder_.getMessageOrBuilder(); + } else { + return resources_ == null ? + flyteidl.core.Tasks.Resources.getDefaultInstance() : resources_; + } + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + flyteidl.core.Tasks.Resources, flyteidl.core.Tasks.Resources.Builder, flyteidl.core.Tasks.ResourcesOrBuilder> + getResourcesFieldBuilder() { + if (resourcesBuilder_ == null) { + resourcesBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + flyteidl.core.Tasks.Resources, flyteidl.core.Tasks.Resources.Builder, flyteidl.core.Tasks.ResourcesOrBuilder>( + getResources(), + getParentForChildren(), + isClean()); + resources_ = null; + } + return resourcesBuilder_; + } + private int restartPolicy_ = 0; /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public int getRestartPolicyValue() { return restartPolicy_; } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public Builder setRestartPolicyValue(int value) { restartPolicy_ = value; @@ -2070,10 +2185,10 @@ public Builder setRestartPolicyValue(int value) { } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy() { @SuppressWarnings("deprecation") @@ -2082,10 +2197,10 @@ public flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy() { } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public Builder setRestartPolicy(flyteidl.plugins.kubeflow.Common.RestartPolicy value) { if (value == null) { @@ -2098,10 +2213,10 @@ public Builder setRestartPolicy(flyteidl.plugins.kubeflow.Common.RestartPolicy v } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public Builder clearRestartPolicy() { @@ -2182,22 +2297,22 @@ public flyteidl.plugins.kubeflow.Pytorch.DistributedPyTorchTrainingReplicaSpec g static { java.lang.String[] descriptorData = { "\n\'flyteidl/plugins/kubeflow/pytorch.prot" + - "o\022\031flyteidl.plugins.kubeflow\032&flyteidl/p" + - "lugins/kubeflow/common.proto\"\322\002\n\036Distrib" + - "utedPyTorchTrainingTask\022Y\n\017worker_replic" + - "as\030\001 \001(\0132@.flyteidl.plugins.kubeflow.Dis" + - "tributedPyTorchTrainingReplicaSpec\022Y\n\017ma" + - "ster_replicas\030\002 \001(\0132@.flyteidl.plugins.k" + - "ubeflow.DistributedPyTorchTrainingReplic" + - "aSpec\0228\n\nrun_policy\030\003 \001(\0132$.flyteidl.plu" + - "gins.kubeflow.RunPolicy\022@\n\016success_polic" + - "y\030\004 \001(\0162(.flyteidl.plugins.kubeflow.Succ" + - "essPolicy\"\226\001\n%DistributedPyTorchTraining" + - "ReplicaSpec\022\020\n\010replicas\030\001 \001(\005\022\031\n\021pod_tem" + - "plate_name\030\002 \001(\t\022@\n\016restart_policy\030\003 \001(\016" + - "2(.flyteidl.plugins.kubeflow.RestartPoli" + - "cyB9Z7github.com/flyteorg/flyteidl/gen/p" + - "b-go/flyteidl/pluginsb\006proto3" + "o\022\031flyteidl.plugins.kubeflow\032\031flyteidl/c" + + "ore/tasks.proto\032&flyteidl/plugins/kubefl" + + "ow/common.proto\"\220\002\n\036DistributedPyTorchTr" + + "ainingTask\022Y\n\017worker_replicas\030\001 \001(\0132@.fl" + + "yteidl.plugins.kubeflow.DistributedPyTor" + + "chTrainingReplicaSpec\022Y\n\017master_replicas" + + "\030\002 \001(\0132@.flyteidl.plugins.kubeflow.Distr" + + "ibutedPyTorchTrainingReplicaSpec\0228\n\nrun_" + + "policy\030\003 \001(\0132$.flyteidl.plugins.kubeflow" + + ".RunPolicy\"\267\001\n%DistributedPyTorchTrainin" + + "gReplicaSpec\022\020\n\010replicas\030\001 \001(\005\022\r\n\005image\030" + + "\002 \001(\t\022+\n\tresources\030\003 \001(\0132\030.flyteidl.core" + + ".Resources\022@\n\016restart_policy\030\004 \001(\0162(.fly" + + "teidl.plugins.kubeflow.RestartPolicyB9Z7" + + "github.com/flyteorg/flyteidl/gen/pb-go/f" + + "lyteidl/pluginsb\006proto3" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -2210,6 +2325,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + flyteidl.core.Tasks.getDescriptor(), flyteidl.plugins.kubeflow.Common.getDescriptor(), }, assigner); internal_static_flyteidl_plugins_kubeflow_DistributedPyTorchTrainingTask_descriptor = @@ -2217,13 +2333,14 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_flyteidl_plugins_kubeflow_DistributedPyTorchTrainingTask_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_flyteidl_plugins_kubeflow_DistributedPyTorchTrainingTask_descriptor, - new java.lang.String[] { "WorkerReplicas", "MasterReplicas", "RunPolicy", "SuccessPolicy", }); + new java.lang.String[] { "WorkerReplicas", "MasterReplicas", "RunPolicy", }); internal_static_flyteidl_plugins_kubeflow_DistributedPyTorchTrainingReplicaSpec_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_flyteidl_plugins_kubeflow_DistributedPyTorchTrainingReplicaSpec_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_flyteidl_plugins_kubeflow_DistributedPyTorchTrainingReplicaSpec_descriptor, - new java.lang.String[] { "Replicas", "PodTemplateName", "RestartPolicy", }); + new java.lang.String[] { "Replicas", "Image", "Resources", "RestartPolicy", }); + flyteidl.core.Tasks.getDescriptor(); flyteidl.plugins.kubeflow.Common.getDescriptor(); } diff --git a/gen/pb-java/flyteidl/plugins/kubeflow/Tensorflow.java b/gen/pb-java/flyteidl/plugins/kubeflow/Tensorflow.java index 0adb94445..0ac70d48e 100644 --- a/gen/pb-java/flyteidl/plugins/kubeflow/Tensorflow.java +++ b/gen/pb-java/flyteidl/plugins/kubeflow/Tensorflow.java @@ -123,23 +123,6 @@ public interface DistributedTensorflowTrainingTaskOrBuilder extends * .flyteidl.plugins.kubeflow.RunPolicy run_policy = 4; */ flyteidl.plugins.kubeflow.Common.RunPolicyOrBuilder getRunPolicyOrBuilder(); - - /** - *
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - */ - int getSuccessPolicyValue(); - /** - *
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - */ - flyteidl.plugins.kubeflow.Common.SuccessPolicy getSuccessPolicy(); } /** *
@@ -158,7 +141,6 @@ private DistributedTensorflowTrainingTask(com.google.protobuf.GeneratedMessageV3
       super(builder);
     }
     private DistributedTensorflowTrainingTask() {
-      successPolicy_ = 0;
     }
 
     @java.lang.Override
@@ -237,12 +219,6 @@ private DistributedTensorflowTrainingTask(
 
               break;
             }
-            case 40: {
-              int rawValue = input.readEnum();
-
-              successPolicy_ = rawValue;
-              break;
-            }
             default: {
               if (!parseUnknownField(
                   input, unknownFields, extensionRegistry, tag)) {
@@ -413,31 +389,6 @@ public flyteidl.plugins.kubeflow.Common.RunPolicyOrBuilder getRunPolicyOrBuilder
       return getRunPolicy();
     }
 
-    public static final int SUCCESS_POLICY_FIELD_NUMBER = 5;
-    private int successPolicy_;
-    /**
-     * 
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - */ - public int getSuccessPolicyValue() { - return successPolicy_; - } - /** - *
-     * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-     * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - */ - public flyteidl.plugins.kubeflow.Common.SuccessPolicy getSuccessPolicy() { - @SuppressWarnings("deprecation") - flyteidl.plugins.kubeflow.Common.SuccessPolicy result = flyteidl.plugins.kubeflow.Common.SuccessPolicy.valueOf(successPolicy_); - return result == null ? flyteidl.plugins.kubeflow.Common.SuccessPolicy.UNRECOGNIZED : result; - } - private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -464,9 +415,6 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (runPolicy_ != null) { output.writeMessage(4, getRunPolicy()); } - if (successPolicy_ != flyteidl.plugins.kubeflow.Common.SuccessPolicy.SUCCESS_POLICY_DEFAULT.getNumber()) { - output.writeEnum(5, successPolicy_); - } unknownFields.writeTo(output); } @@ -492,10 +440,6 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, getRunPolicy()); } - if (successPolicy_ != flyteidl.plugins.kubeflow.Common.SuccessPolicy.SUCCESS_POLICY_DEFAULT.getNumber()) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(5, successPolicy_); - } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -531,7 +475,6 @@ public boolean equals(final java.lang.Object obj) { if (!getRunPolicy() .equals(other.getRunPolicy())) return false; } - if (successPolicy_ != other.successPolicy_) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -559,8 +502,6 @@ public int hashCode() { hash = (37 * hash) + RUN_POLICY_FIELD_NUMBER; hash = (53 * hash) + getRunPolicy().hashCode(); } - hash = (37 * hash) + SUCCESS_POLICY_FIELD_NUMBER; - hash = (53 * hash) + successPolicy_; hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -722,8 +663,6 @@ public Builder clear() { runPolicy_ = null; runPolicyBuilder_ = null; } - successPolicy_ = 0; - return this; } @@ -770,7 +709,6 @@ public flyteidl.plugins.kubeflow.Tensorflow.DistributedTensorflowTrainingTask bu } else { result.runPolicy_ = runPolicyBuilder_.build(); } - result.successPolicy_ = successPolicy_; onBuilt(); return result; } @@ -831,9 +769,6 @@ public Builder mergeFrom(flyteidl.plugins.kubeflow.Tensorflow.DistributedTensorf if (other.hasRunPolicy()) { mergeRunPolicy(other.getRunPolicy()); } - if (other.successPolicy_ != 0) { - setSuccessPolicyValue(other.getSuccessPolicyValue()); - } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1492,71 +1427,6 @@ public flyteidl.plugins.kubeflow.Common.RunPolicyOrBuilder getRunPolicyOrBuilder } return runPolicyBuilder_; } - - private int successPolicy_ = 0; - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - */ - public int getSuccessPolicyValue() { - return successPolicy_; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - */ - public Builder setSuccessPolicyValue(int value) { - successPolicy_ = value; - onChanged(); - return this; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - */ - public flyteidl.plugins.kubeflow.Common.SuccessPolicy getSuccessPolicy() { - @SuppressWarnings("deprecation") - flyteidl.plugins.kubeflow.Common.SuccessPolicy result = flyteidl.plugins.kubeflow.Common.SuccessPolicy.valueOf(successPolicy_); - return result == null ? flyteidl.plugins.kubeflow.Common.SuccessPolicy.UNRECOGNIZED : result; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - */ - public Builder setSuccessPolicy(flyteidl.plugins.kubeflow.Common.SuccessPolicy value) { - if (value == null) { - throw new NullPointerException(); - } - - successPolicy_ = value.getNumber(); - onChanged(); - return this; - } - /** - *
-       * SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None.
-       * 
- * - * .flyteidl.plugins.kubeflow.SuccessPolicy success_policy = 5; - */ - public Builder clearSuccessPolicy() { - - successPolicy_ = 0; - onChanged(); - return this; - } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -1616,7 +1486,7 @@ public interface DistributedTensorflowTrainingReplicaSpecOrBuilder extends /** *
-     * Number of workers
+     * Number of replicas
      * 
* * int32 replicas = 1; @@ -1625,38 +1495,61 @@ public interface DistributedTensorflowTrainingReplicaSpecOrBuilder extends /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ - java.lang.String getPodTemplateName(); + java.lang.String getImage(); /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ com.google.protobuf.ByteString - getPodTemplateNameBytes(); + getImageBytes(); /** *
-     * Restart policy for the worker
+     * Resources required for the replica group
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.core.Resources resources = 3; + */ + boolean hasResources(); + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + flyteidl.core.Tasks.Resources getResources(); + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + flyteidl.core.Tasks.ResourcesOrBuilder getResourcesOrBuilder(); + + /** + *
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
+     * 
+ * + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ int getRestartPolicyValue(); /** *
-     * Restart policy for the worker
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy(); } @@ -1673,7 +1566,7 @@ private DistributedTensorflowTrainingReplicaSpec(com.google.protobuf.GeneratedMe super(builder); } private DistributedTensorflowTrainingReplicaSpec() { - podTemplateName_ = ""; + image_ = ""; restartPolicy_ = 0; } @@ -1709,10 +1602,23 @@ private DistributedTensorflowTrainingReplicaSpec( case 18: { java.lang.String s = input.readStringRequireUtf8(); - podTemplateName_ = s; + image_ = s; break; } - case 24: { + case 26: { + flyteidl.core.Tasks.Resources.Builder subBuilder = null; + if (resources_ != null) { + subBuilder = resources_.toBuilder(); + } + resources_ = input.readMessage(flyteidl.core.Tasks.Resources.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(resources_); + resources_ = subBuilder.buildPartial(); + } + + break; + } + case 32: { int rawValue = input.readEnum(); restartPolicy_ = rawValue; @@ -1754,7 +1660,7 @@ private DistributedTensorflowTrainingReplicaSpec( private int replicas_; /** *
-     * Number of workers
+     * Number of replicas
      * 
* * int32 replicas = 1; @@ -1763,68 +1669,99 @@ public int getReplicas() { return replicas_; } - public static final int POD_TEMPLATE_NAME_FIELD_NUMBER = 2; - private volatile java.lang.Object podTemplateName_; + public static final int IMAGE_FIELD_NUMBER = 2; + private volatile java.lang.Object image_; /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ - public java.lang.String getPodTemplateName() { - java.lang.Object ref = podTemplateName_; + public java.lang.String getImage() { + java.lang.Object ref = image_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); - podTemplateName_ = s; + image_ = s; return s; } } /** *
-     * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-     * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+     * Image used for the replica group
      * 
* - * string pod_template_name = 2; + * string image = 2; */ public com.google.protobuf.ByteString - getPodTemplateNameBytes() { - java.lang.Object ref = podTemplateName_; + getImageBytes() { + java.lang.Object ref = image_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - podTemplateName_ = b; + image_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int RESTART_POLICY_FIELD_NUMBER = 3; + public static final int RESOURCES_FIELD_NUMBER = 3; + private flyteidl.core.Tasks.Resources resources_; + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public boolean hasResources() { + return resources_ != null; + } + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.Resources getResources() { + return resources_ == null ? flyteidl.core.Tasks.Resources.getDefaultInstance() : resources_; + } + /** + *
+     * Resources required for the replica group
+     * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.ResourcesOrBuilder getResourcesOrBuilder() { + return getResources(); + } + + public static final int RESTART_POLICY_FIELD_NUMBER = 4; private int restartPolicy_; /** *
-     * Restart policy for the worker
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public int getRestartPolicyValue() { return restartPolicy_; } /** *
-     * Restart policy for the worker
+     * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
      * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy() { @SuppressWarnings("deprecation") @@ -1849,11 +1786,14 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (replicas_ != 0) { output.writeInt32(1, replicas_); } - if (!getPodTemplateNameBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, podTemplateName_); + if (!getImageBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, image_); + } + if (resources_ != null) { + output.writeMessage(3, getResources()); } if (restartPolicy_ != flyteidl.plugins.kubeflow.Common.RestartPolicy.RESTART_POLICY_ALWAYS.getNumber()) { - output.writeEnum(3, restartPolicy_); + output.writeEnum(4, restartPolicy_); } unknownFields.writeTo(output); } @@ -1868,12 +1808,16 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt32Size(1, replicas_); } - if (!getPodTemplateNameBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, podTemplateName_); + if (!getImageBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, image_); + } + if (resources_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getResources()); } if (restartPolicy_ != flyteidl.plugins.kubeflow.Common.RestartPolicy.RESTART_POLICY_ALWAYS.getNumber()) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(3, restartPolicy_); + .computeEnumSize(4, restartPolicy_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -1892,8 +1836,13 @@ public boolean equals(final java.lang.Object obj) { if (getReplicas() != other.getReplicas()) return false; - if (!getPodTemplateName() - .equals(other.getPodTemplateName())) return false; + if (!getImage() + .equals(other.getImage())) return false; + if (hasResources() != other.hasResources()) return false; + if (hasResources()) { + if (!getResources() + .equals(other.getResources())) return false; + } if (restartPolicy_ != other.restartPolicy_) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; @@ -1908,8 +1857,12 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + REPLICAS_FIELD_NUMBER; hash = (53 * hash) + getReplicas(); - hash = (37 * hash) + POD_TEMPLATE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getPodTemplateName().hashCode(); + hash = (37 * hash) + IMAGE_FIELD_NUMBER; + hash = (53 * hash) + getImage().hashCode(); + if (hasResources()) { + hash = (37 * hash) + RESOURCES_FIELD_NUMBER; + hash = (53 * hash) + getResources().hashCode(); + } hash = (37 * hash) + RESTART_POLICY_FIELD_NUMBER; hash = (53 * hash) + restartPolicy_; hash = (29 * hash) + unknownFields.hashCode(); @@ -2047,8 +2000,14 @@ public Builder clear() { super.clear(); replicas_ = 0; - podTemplateName_ = ""; + image_ = ""; + if (resourcesBuilder_ == null) { + resources_ = null; + } else { + resources_ = null; + resourcesBuilder_ = null; + } restartPolicy_ = 0; return this; @@ -2078,7 +2037,12 @@ public flyteidl.plugins.kubeflow.Tensorflow.DistributedTensorflowTrainingReplica public flyteidl.plugins.kubeflow.Tensorflow.DistributedTensorflowTrainingReplicaSpec buildPartial() { flyteidl.plugins.kubeflow.Tensorflow.DistributedTensorflowTrainingReplicaSpec result = new flyteidl.plugins.kubeflow.Tensorflow.DistributedTensorflowTrainingReplicaSpec(this); result.replicas_ = replicas_; - result.podTemplateName_ = podTemplateName_; + result.image_ = image_; + if (resourcesBuilder_ == null) { + result.resources_ = resources_; + } else { + result.resources_ = resourcesBuilder_.build(); + } result.restartPolicy_ = restartPolicy_; onBuilt(); return result; @@ -2131,10 +2095,13 @@ public Builder mergeFrom(flyteidl.plugins.kubeflow.Tensorflow.DistributedTensorf if (other.getReplicas() != 0) { setReplicas(other.getReplicas()); } - if (!other.getPodTemplateName().isEmpty()) { - podTemplateName_ = other.podTemplateName_; + if (!other.getImage().isEmpty()) { + image_ = other.image_; onChanged(); } + if (other.hasResources()) { + mergeResources(other.getResources()); + } if (other.restartPolicy_ != 0) { setRestartPolicyValue(other.getRestartPolicyValue()); } @@ -2170,7 +2137,7 @@ public Builder mergeFrom( private int replicas_ ; /** *
-       * Number of workers
+       * Number of replicas
        * 
* * int32 replicas = 1; @@ -2180,7 +2147,7 @@ public int getReplicas() { } /** *
-       * Number of workers
+       * Number of replicas
        * 
* * int32 replicas = 1; @@ -2193,7 +2160,7 @@ public Builder setReplicas(int value) { } /** *
-       * Number of workers
+       * Number of replicas
        * 
* * int32 replicas = 1; @@ -2205,22 +2172,21 @@ public Builder clearReplicas() { return this; } - private java.lang.Object podTemplateName_ = ""; + private java.lang.Object image_ = ""; /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public java.lang.String getPodTemplateName() { - java.lang.Object ref = podTemplateName_; + public java.lang.String getImage() { + java.lang.Object ref = image_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); - podTemplateName_ = s; + image_ = s; return s; } else { return (java.lang.String) ref; @@ -2228,20 +2194,19 @@ public java.lang.String getPodTemplateName() { } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ public com.google.protobuf.ByteString - getPodTemplateNameBytes() { - java.lang.Object ref = podTemplateName_; + getImageBytes() { + java.lang.Object ref = image_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - podTemplateName_ = b; + image_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -2249,73 +2214,223 @@ public java.lang.String getPodTemplateName() { } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public Builder setPodTemplateName( + public Builder setImage( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - podTemplateName_ = value; + image_ = value; onChanged(); return this; } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public Builder clearPodTemplateName() { + public Builder clearImage() { - podTemplateName_ = getDefaultInstance().getPodTemplateName(); + image_ = getDefaultInstance().getImage(); onChanged(); return this; } /** *
-       * Unique name of a PodTemplate k8s resource to be used as the base configuration.
-       * PodTemplate specified here will be overriden by the pod template specified at the task metedata level.
+       * Image used for the replica group
        * 
* - * string pod_template_name = 2; + * string image = 2; */ - public Builder setPodTemplateNameBytes( + public Builder setImageBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); - podTemplateName_ = value; + image_ = value; onChanged(); return this; } + private flyteidl.core.Tasks.Resources resources_; + private com.google.protobuf.SingleFieldBuilderV3< + flyteidl.core.Tasks.Resources, flyteidl.core.Tasks.Resources.Builder, flyteidl.core.Tasks.ResourcesOrBuilder> resourcesBuilder_; + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public boolean hasResources() { + return resourcesBuilder_ != null || resources_ != null; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.Resources getResources() { + if (resourcesBuilder_ == null) { + return resources_ == null ? flyteidl.core.Tasks.Resources.getDefaultInstance() : resources_; + } else { + return resourcesBuilder_.getMessage(); + } + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder setResources(flyteidl.core.Tasks.Resources value) { + if (resourcesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + resources_ = value; + onChanged(); + } else { + resourcesBuilder_.setMessage(value); + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder setResources( + flyteidl.core.Tasks.Resources.Builder builderForValue) { + if (resourcesBuilder_ == null) { + resources_ = builderForValue.build(); + onChanged(); + } else { + resourcesBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder mergeResources(flyteidl.core.Tasks.Resources value) { + if (resourcesBuilder_ == null) { + if (resources_ != null) { + resources_ = + flyteidl.core.Tasks.Resources.newBuilder(resources_).mergeFrom(value).buildPartial(); + } else { + resources_ = value; + } + onChanged(); + } else { + resourcesBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public Builder clearResources() { + if (resourcesBuilder_ == null) { + resources_ = null; + onChanged(); + } else { + resources_ = null; + resourcesBuilder_ = null; + } + + return this; + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.Resources.Builder getResourcesBuilder() { + + onChanged(); + return getResourcesFieldBuilder().getBuilder(); + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + public flyteidl.core.Tasks.ResourcesOrBuilder getResourcesOrBuilder() { + if (resourcesBuilder_ != null) { + return resourcesBuilder_.getMessageOrBuilder(); + } else { + return resources_ == null ? + flyteidl.core.Tasks.Resources.getDefaultInstance() : resources_; + } + } + /** + *
+       * Resources required for the replica group
+       * 
+ * + * .flyteidl.core.Resources resources = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + flyteidl.core.Tasks.Resources, flyteidl.core.Tasks.Resources.Builder, flyteidl.core.Tasks.ResourcesOrBuilder> + getResourcesFieldBuilder() { + if (resourcesBuilder_ == null) { + resourcesBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + flyteidl.core.Tasks.Resources, flyteidl.core.Tasks.Resources.Builder, flyteidl.core.Tasks.ResourcesOrBuilder>( + getResources(), + getParentForChildren(), + isClean()); + resources_ = null; + } + return resourcesBuilder_; + } + private int restartPolicy_ = 0; /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public int getRestartPolicyValue() { return restartPolicy_; } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public Builder setRestartPolicyValue(int value) { restartPolicy_ = value; @@ -2324,10 +2439,10 @@ public Builder setRestartPolicyValue(int value) { } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy() { @SuppressWarnings("deprecation") @@ -2336,10 +2451,10 @@ public flyteidl.plugins.kubeflow.Common.RestartPolicy getRestartPolicy() { } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public Builder setRestartPolicy(flyteidl.plugins.kubeflow.Common.RestartPolicy value) { if (value == null) { @@ -2352,10 +2467,10 @@ public Builder setRestartPolicy(flyteidl.plugins.kubeflow.Common.RestartPolicy v } /** *
-       * Restart policy for the worker
+       * RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows:
        * 
* - * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 3; + * .flyteidl.plugins.kubeflow.RestartPolicy restart_policy = 4; */ public Builder clearRestartPolicy() { @@ -2436,25 +2551,25 @@ public flyteidl.plugins.kubeflow.Tensorflow.DistributedTensorflowTrainingReplica static { java.lang.String[] descriptorData = { "\n*flyteidl/plugins/kubeflow/tensorflow.p" + - "roto\022\031flyteidl.plugins.kubeflow\032&flyteid" + - "l/plugins/kubeflow/common.proto\"\264\003\n!Dist" + - "ributedTensorflowTrainingTask\022\\\n\017worker_" + - "replicas\030\001 \001(\0132C.flyteidl.plugins.kubefl" + - "ow.DistributedTensorflowTrainingReplicaS" + - "pec\022X\n\013ps_replicas\030\002 \001(\0132C.flyteidl.plug" + + "roto\022\031flyteidl.plugins.kubeflow\032\031flyteid" + + "l/core/tasks.proto\032&flyteidl/plugins/kub" + + "eflow/common.proto\"\362\002\n!DistributedTensor" + + "flowTrainingTask\022\\\n\017worker_replicas\030\001 \001(" + + "\0132C.flyteidl.plugins.kubeflow.Distribute" + + "dTensorflowTrainingReplicaSpec\022X\n\013ps_rep" + + "licas\030\002 \001(\0132C.flyteidl.plugins.kubeflow." + + "DistributedTensorflowTrainingReplicaSpec" + + "\022[\n\016chief_replicas\030\003 \001(\0132C.flyteidl.plug" + "ins.kubeflow.DistributedTensorflowTraini" + - "ngReplicaSpec\022[\n\016chief_replicas\030\003 \001(\0132C." + - "flyteidl.plugins.kubeflow.DistributedTen" + - "sorflowTrainingReplicaSpec\0228\n\nrun_policy" + - "\030\004 \001(\0132$.flyteidl.plugins.kubeflow.RunPo" + - "licy\022@\n\016success_policy\030\005 \001(\0162(.flyteidl." + - "plugins.kubeflow.SuccessPolicy\"\231\001\n(Distr" + - "ibutedTensorflowTrainingReplicaSpec\022\020\n\010r" + - "eplicas\030\001 \001(\005\022\031\n\021pod_template_name\030\002 \001(\t" + - "\022@\n\016restart_policy\030\003 \001(\0162(.flyteidl.plug" + - "ins.kubeflow.RestartPolicyB9Z7github.com" + - "/flyteorg/flyteidl/gen/pb-go/flyteidl/pl" + - "uginsb\006proto3" + "ngReplicaSpec\0228\n\nrun_policy\030\004 \001(\0132$.flyt" + + "eidl.plugins.kubeflow.RunPolicy\"\272\001\n(Dist" + + "ributedTensorflowTrainingReplicaSpec\022\020\n\010" + + "replicas\030\001 \001(\005\022\r\n\005image\030\002 \001(\t\022+\n\tresourc" + + "es\030\003 \001(\0132\030.flyteidl.core.Resources\022@\n\016re" + + "start_policy\030\004 \001(\0162(.flyteidl.plugins.ku" + + "beflow.RestartPolicyB9Z7github.com/flyte" + + "org/flyteidl/gen/pb-go/flyteidl/pluginsb" + + "\006proto3" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -2467,6 +2582,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + flyteidl.core.Tasks.getDescriptor(), flyteidl.plugins.kubeflow.Common.getDescriptor(), }, assigner); internal_static_flyteidl_plugins_kubeflow_DistributedTensorflowTrainingTask_descriptor = @@ -2474,13 +2590,14 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_flyteidl_plugins_kubeflow_DistributedTensorflowTrainingTask_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_flyteidl_plugins_kubeflow_DistributedTensorflowTrainingTask_descriptor, - new java.lang.String[] { "WorkerReplicas", "PsReplicas", "ChiefReplicas", "RunPolicy", "SuccessPolicy", }); + new java.lang.String[] { "WorkerReplicas", "PsReplicas", "ChiefReplicas", "RunPolicy", }); internal_static_flyteidl_plugins_kubeflow_DistributedTensorflowTrainingReplicaSpec_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_flyteidl_plugins_kubeflow_DistributedTensorflowTrainingReplicaSpec_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_flyteidl_plugins_kubeflow_DistributedTensorflowTrainingReplicaSpec_descriptor, - new java.lang.String[] { "Replicas", "PodTemplateName", "RestartPolicy", }); + new java.lang.String[] { "Replicas", "Image", "Resources", "RestartPolicy", }); + flyteidl.core.Tasks.getDescriptor(); flyteidl.plugins.kubeflow.Common.getDescriptor(); } diff --git a/gen/pb_python/flyteidl/plugins/kubeflow/common_pb2.py b/gen/pb_python/flyteidl/plugins/kubeflow/common_pb2.py index d10e4ecb1..c64c3c44e 100644 --- a/gen/pb_python/flyteidl/plugins/kubeflow/common_pb2.py +++ b/gen/pb_python/flyteidl/plugins/kubeflow/common_pb2.py @@ -13,7 +13,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&flyteidl/plugins/kubeflow/common.proto\x12\x19\x66lyteidl.plugins.kubeflow\"\xf8\x01\n\tRunPolicy\x12S\n\x10\x63lean_pod_policy\x18\x01 \x01(\x0e\x32).flyteidl.plugins.kubeflow.CleanPodPolicyR\x0e\x63leanPodPolicy\x12;\n\x1attl_seconds_after_finished\x18\x02 \x01(\x05R\x17ttlSecondsAfterFinished\x12\x34\n\x15\x61\x63tiveDeadlineSeconds\x18\x03 \x01(\x05R\x15\x61\x63tiveDeadlineSeconds\x12#\n\rbackoff_limit\x18\x04 \x01(\x05R\x0c\x62\x61\x63koffLimit*K\n\rSuccessPolicy\x12\x1a\n\x16SUCCESS_POLICY_DEFAULT\x10\x00\x12\x1e\n\x1aSUCCESS_POLICY_ALL_WORKERS\x10\x01*\x7f\n\x0e\x43leanPodPolicy\x12\x1d\n\x19\x43LEANPOD_POLICY_UNDEFINED\x10\x00\x12\x17\n\x13\x43LEANPOD_POLICY_ALL\x10\x01\x12\x1b\n\x17\x43LEANPOD_POLICY_RUNNING\x10\x02\x12\x18\n\x14\x43LEANPOD_POLICY_NONE\x10\x03*c\n\rRestartPolicy\x12\x19\n\x15RESTART_POLICY_ALWAYS\x10\x00\x12\x1d\n\x19RESTART_POLICY_ON_FAILURE\x10\x01\x12\x18\n\x14RESTART_POLICY_NEVER\x10\x02\x42\xeb\x01\n\x1d\x63om.flyteidl.plugins.kubeflowB\x0b\x43ommonProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PK\xaa\x02\x19\x46lyteidl.Plugins.Kubeflow\xca\x02\x19\x46lyteidl\\Plugins\\Kubeflow\xe2\x02%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1b\x46lyteidl::Plugins::Kubeflowb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&flyteidl/plugins/kubeflow/common.proto\x12\x19\x66lyteidl.plugins.kubeflow\"\xf8\x01\n\tRunPolicy\x12S\n\x10\x63lean_pod_policy\x18\x01 \x01(\x0e\x32).flyteidl.plugins.kubeflow.CleanPodPolicyR\x0e\x63leanPodPolicy\x12;\n\x1attl_seconds_after_finished\x18\x02 \x01(\x05R\x17ttlSecondsAfterFinished\x12\x34\n\x15\x61\x63tiveDeadlineSeconds\x18\x03 \x01(\x05R\x15\x61\x63tiveDeadlineSeconds\x12#\n\rbackoff_limit\x18\x04 \x01(\x05R\x0c\x62\x61\x63koffLimit*`\n\x0e\x43leanPodPolicy\x12\x17\n\x13\x43LEANPOD_POLICY_ALL\x10\x00\x12\x1b\n\x17\x43LEANPOD_POLICY_RUNNING\x10\x01\x12\x18\n\x14\x43LEANPOD_POLICY_NONE\x10\x02*c\n\rRestartPolicy\x12\x19\n\x15RESTART_POLICY_ALWAYS\x10\x00\x12\x1d\n\x19RESTART_POLICY_ON_FAILURE\x10\x01\x12\x18\n\x14RESTART_POLICY_NEVER\x10\x02\x42\xeb\x01\n\x1d\x63om.flyteidl.plugins.kubeflowB\x0b\x43ommonProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PK\xaa\x02\x19\x46lyteidl.Plugins.Kubeflow\xca\x02\x19\x46lyteidl\\Plugins\\Kubeflow\xe2\x02%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1b\x46lyteidl::Plugins::Kubeflowb\x06proto3') _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl.plugins.kubeflow.common_pb2', globals()) @@ -21,12 +21,10 @@ DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\035com.flyteidl.plugins.kubeflowB\013CommonProtoP\001Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\242\002\003FPK\252\002\031Flyteidl.Plugins.Kubeflow\312\002\031Flyteidl\\Plugins\\Kubeflow\342\002%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\352\002\033Flyteidl::Plugins::Kubeflow' - _SUCCESSPOLICY._serialized_start=320 - _SUCCESSPOLICY._serialized_end=395 - _CLEANPODPOLICY._serialized_start=397 - _CLEANPODPOLICY._serialized_end=524 - _RESTARTPOLICY._serialized_start=526 - _RESTARTPOLICY._serialized_end=625 + _CLEANPODPOLICY._serialized_start=320 + _CLEANPODPOLICY._serialized_end=416 + _RESTARTPOLICY._serialized_start=418 + _RESTARTPOLICY._serialized_end=517 _RUNPOLICY._serialized_start=70 _RUNPOLICY._serialized_end=318 # @@protoc_insertion_point(module_scope) diff --git a/gen/pb_python/flyteidl/plugins/kubeflow/common_pb2.pyi b/gen/pb_python/flyteidl/plugins/kubeflow/common_pb2.pyi index 6a2e3fb15..343ee0bfb 100644 --- a/gen/pb_python/flyteidl/plugins/kubeflow/common_pb2.pyi +++ b/gen/pb_python/flyteidl/plugins/kubeflow/common_pb2.pyi @@ -6,13 +6,10 @@ from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union CLEANPOD_POLICY_ALL: CleanPodPolicy CLEANPOD_POLICY_NONE: CleanPodPolicy CLEANPOD_POLICY_RUNNING: CleanPodPolicy -CLEANPOD_POLICY_UNDEFINED: CleanPodPolicy DESCRIPTOR: _descriptor.FileDescriptor RESTART_POLICY_ALWAYS: RestartPolicy RESTART_POLICY_NEVER: RestartPolicy RESTART_POLICY_ON_FAILURE: RestartPolicy -SUCCESS_POLICY_ALL_WORKERS: SuccessPolicy -SUCCESS_POLICY_DEFAULT: SuccessPolicy class RunPolicy(_message.Message): __slots__ = ["activeDeadlineSeconds", "backoff_limit", "clean_pod_policy", "ttl_seconds_after_finished"] @@ -26,9 +23,6 @@ class RunPolicy(_message.Message): ttl_seconds_after_finished: int def __init__(self, clean_pod_policy: _Optional[_Union[CleanPodPolicy, str]] = ..., ttl_seconds_after_finished: _Optional[int] = ..., activeDeadlineSeconds: _Optional[int] = ..., backoff_limit: _Optional[int] = ...) -> None: ... -class SuccessPolicy(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - class CleanPodPolicy(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = [] diff --git a/gen/pb_python/flyteidl/plugins/kubeflow/mpi_pb2.py b/gen/pb_python/flyteidl/plugins/kubeflow/mpi_pb2.py index 7d0c9139d..e95a9926f 100644 --- a/gen/pb_python/flyteidl/plugins/kubeflow/mpi_pb2.py +++ b/gen/pb_python/flyteidl/plugins/kubeflow/mpi_pb2.py @@ -11,10 +11,11 @@ _sym_db = _symbol_database.Default() +from flyteidl.core import tasks_pb2 as flyteidl_dot_core_dot_tasks__pb2 from flyteidl.plugins.kubeflow import common_pb2 as flyteidl_dot_plugins_dot_kubeflow_dot_common__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#flyteidl/plugins/kubeflow/mpi.proto\x12\x19\x66lyteidl.plugins.kubeflow\x1a&flyteidl/plugins/kubeflow/common.proto\"\x84\x03\n\x1a\x44istributedMPITrainingTask\x12\x65\n\x0fworker_replicas\x18\x01 \x01(\x0b\x32<.flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpecR\x0eworkerReplicas\x12i\n\x11launcher_replicas\x18\x02 \x01(\x0b\x32<.flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpecR\x10launcherReplicas\x12\x43\n\nrun_policy\x18\x03 \x01(\x0b\x32$.flyteidl.plugins.kubeflow.RunPolicyR\trunPolicy\x12O\n\x0esuccess_policy\x18\x04 \x01(\x0e\x32(.flyteidl.plugins.kubeflow.SuccessPolicyR\rsuccessPolicy\"\xbc\x01\n!DistributedMPITrainingReplicaSpec\x12\x1a\n\x08replicas\x18\x01 \x01(\x05R\x08replicas\x12*\n\x11pod_template_name\x18\x02 \x01(\tR\x0fpodTemplateName\x12O\n\x0erestart_policy\x18\x03 \x01(\x0e\x32(.flyteidl.plugins.kubeflow.RestartPolicyR\rrestartPolicyB\xe8\x01\n\x1d\x63om.flyteidl.plugins.kubeflowB\x08MpiProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PK\xaa\x02\x19\x46lyteidl.Plugins.Kubeflow\xca\x02\x19\x46lyteidl\\Plugins\\Kubeflow\xe2\x02%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1b\x46lyteidl::Plugins::Kubeflowb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#flyteidl/plugins/kubeflow/mpi.proto\x12\x19\x66lyteidl.plugins.kubeflow\x1a\x19\x66lyteidl/core/tasks.proto\x1a&flyteidl/plugins/kubeflow/common.proto\"\xb3\x02\n\x1a\x44istributedMPITrainingTask\x12\x65\n\x0fworker_replicas\x18\x01 \x01(\x0b\x32<.flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpecR\x0eworkerReplicas\x12i\n\x11launcher_replicas\x18\x02 \x01(\x0b\x32<.flyteidl.plugins.kubeflow.DistributedMPITrainingReplicaSpecR\x10launcherReplicas\x12\x43\n\nrun_policy\x18\x03 \x01(\x0b\x32$.flyteidl.plugins.kubeflow.RunPolicyR\trunPolicy\"\xde\x01\n!DistributedMPITrainingReplicaSpec\x12\x1a\n\x08replicas\x18\x01 \x01(\x05R\x08replicas\x12\x14\n\x05image\x18\x02 \x01(\tR\x05image\x12\x36\n\tresources\x18\x03 \x01(\x0b\x32\x18.flyteidl.core.ResourcesR\tresources\x12O\n\x0erestart_policy\x18\x04 \x01(\x0e\x32(.flyteidl.plugins.kubeflow.RestartPolicyR\rrestartPolicyB\xe8\x01\n\x1d\x63om.flyteidl.plugins.kubeflowB\x08MpiProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PK\xaa\x02\x19\x46lyteidl.Plugins.Kubeflow\xca\x02\x19\x46lyteidl\\Plugins\\Kubeflow\xe2\x02%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1b\x46lyteidl::Plugins::Kubeflowb\x06proto3') _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl.plugins.kubeflow.mpi_pb2', globals()) @@ -22,8 +23,8 @@ DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\035com.flyteidl.plugins.kubeflowB\010MpiProtoP\001Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\242\002\003FPK\252\002\031Flyteidl.Plugins.Kubeflow\312\002\031Flyteidl\\Plugins\\Kubeflow\342\002%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\352\002\033Flyteidl::Plugins::Kubeflow' - _DISTRIBUTEDMPITRAININGTASK._serialized_start=107 - _DISTRIBUTEDMPITRAININGTASK._serialized_end=495 - _DISTRIBUTEDMPITRAININGREPLICASPEC._serialized_start=498 - _DISTRIBUTEDMPITRAININGREPLICASPEC._serialized_end=686 + _DISTRIBUTEDMPITRAININGTASK._serialized_start=134 + _DISTRIBUTEDMPITRAININGTASK._serialized_end=441 + _DISTRIBUTEDMPITRAININGREPLICASPEC._serialized_start=444 + _DISTRIBUTEDMPITRAININGREPLICASPEC._serialized_end=666 # @@protoc_insertion_point(module_scope) diff --git a/gen/pb_python/flyteidl/plugins/kubeflow/mpi_pb2.pyi b/gen/pb_python/flyteidl/plugins/kubeflow/mpi_pb2.pyi index 51b79b771..ffe479ce5 100644 --- a/gen/pb_python/flyteidl/plugins/kubeflow/mpi_pb2.pyi +++ b/gen/pb_python/flyteidl/plugins/kubeflow/mpi_pb2.pyi @@ -1,3 +1,4 @@ +from flyteidl.core import tasks_pb2 as _tasks_pb2 from flyteidl.plugins.kubeflow import common_pb2 as _common_pb2 from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -6,23 +7,23 @@ from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Opti DESCRIPTOR: _descriptor.FileDescriptor class DistributedMPITrainingReplicaSpec(_message.Message): - __slots__ = ["pod_template_name", "replicas", "restart_policy"] - POD_TEMPLATE_NAME_FIELD_NUMBER: _ClassVar[int] + __slots__ = ["image", "replicas", "resources", "restart_policy"] + IMAGE_FIELD_NUMBER: _ClassVar[int] REPLICAS_FIELD_NUMBER: _ClassVar[int] + RESOURCES_FIELD_NUMBER: _ClassVar[int] RESTART_POLICY_FIELD_NUMBER: _ClassVar[int] - pod_template_name: str + image: str replicas: int + resources: _tasks_pb2.Resources restart_policy: _common_pb2.RestartPolicy - def __init__(self, replicas: _Optional[int] = ..., pod_template_name: _Optional[str] = ..., restart_policy: _Optional[_Union[_common_pb2.RestartPolicy, str]] = ...) -> None: ... + def __init__(self, replicas: _Optional[int] = ..., image: _Optional[str] = ..., resources: _Optional[_Union[_tasks_pb2.Resources, _Mapping]] = ..., restart_policy: _Optional[_Union[_common_pb2.RestartPolicy, str]] = ...) -> None: ... class DistributedMPITrainingTask(_message.Message): - __slots__ = ["launcher_replicas", "run_policy", "success_policy", "worker_replicas"] + __slots__ = ["launcher_replicas", "run_policy", "worker_replicas"] LAUNCHER_REPLICAS_FIELD_NUMBER: _ClassVar[int] RUN_POLICY_FIELD_NUMBER: _ClassVar[int] - SUCCESS_POLICY_FIELD_NUMBER: _ClassVar[int] WORKER_REPLICAS_FIELD_NUMBER: _ClassVar[int] launcher_replicas: DistributedMPITrainingReplicaSpec run_policy: _common_pb2.RunPolicy - success_policy: _common_pb2.SuccessPolicy worker_replicas: DistributedMPITrainingReplicaSpec - def __init__(self, worker_replicas: _Optional[_Union[DistributedMPITrainingReplicaSpec, _Mapping]] = ..., launcher_replicas: _Optional[_Union[DistributedMPITrainingReplicaSpec, _Mapping]] = ..., run_policy: _Optional[_Union[_common_pb2.RunPolicy, _Mapping]] = ..., success_policy: _Optional[_Union[_common_pb2.SuccessPolicy, str]] = ...) -> None: ... + def __init__(self, worker_replicas: _Optional[_Union[DistributedMPITrainingReplicaSpec, _Mapping]] = ..., launcher_replicas: _Optional[_Union[DistributedMPITrainingReplicaSpec, _Mapping]] = ..., run_policy: _Optional[_Union[_common_pb2.RunPolicy, _Mapping]] = ...) -> None: ... diff --git a/gen/pb_python/flyteidl/plugins/kubeflow/pytorch_pb2.py b/gen/pb_python/flyteidl/plugins/kubeflow/pytorch_pb2.py index 41501e667..85874dcb2 100644 --- a/gen/pb_python/flyteidl/plugins/kubeflow/pytorch_pb2.py +++ b/gen/pb_python/flyteidl/plugins/kubeflow/pytorch_pb2.py @@ -11,10 +11,11 @@ _sym_db = _symbol_database.Default() +from flyteidl.core import tasks_pb2 as flyteidl_dot_core_dot_tasks__pb2 from flyteidl.plugins.kubeflow import common_pb2 as flyteidl_dot_plugins_dot_kubeflow_dot_common__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'flyteidl/plugins/kubeflow/pytorch.proto\x12\x19\x66lyteidl.plugins.kubeflow\x1a&flyteidl/plugins/kubeflow/common.proto\"\x8c\x03\n\x1e\x44istributedPyTorchTrainingTask\x12i\n\x0fworker_replicas\x18\x01 \x01(\x0b\x32@.flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpecR\x0eworkerReplicas\x12i\n\x0fmaster_replicas\x18\x02 \x01(\x0b\x32@.flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpecR\x0emasterReplicas\x12\x43\n\nrun_policy\x18\x03 \x01(\x0b\x32$.flyteidl.plugins.kubeflow.RunPolicyR\trunPolicy\x12O\n\x0esuccess_policy\x18\x04 \x01(\x0e\x32(.flyteidl.plugins.kubeflow.SuccessPolicyR\rsuccessPolicy\"\xc0\x01\n%DistributedPyTorchTrainingReplicaSpec\x12\x1a\n\x08replicas\x18\x01 \x01(\x05R\x08replicas\x12*\n\x11pod_template_name\x18\x02 \x01(\tR\x0fpodTemplateName\x12O\n\x0erestart_policy\x18\x03 \x01(\x0e\x32(.flyteidl.plugins.kubeflow.RestartPolicyR\rrestartPolicyB\xec\x01\n\x1d\x63om.flyteidl.plugins.kubeflowB\x0cPytorchProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PK\xaa\x02\x19\x46lyteidl.Plugins.Kubeflow\xca\x02\x19\x46lyteidl\\Plugins\\Kubeflow\xe2\x02%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1b\x46lyteidl::Plugins::Kubeflowb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'flyteidl/plugins/kubeflow/pytorch.proto\x12\x19\x66lyteidl.plugins.kubeflow\x1a\x19\x66lyteidl/core/tasks.proto\x1a&flyteidl/plugins/kubeflow/common.proto\"\xbb\x02\n\x1e\x44istributedPyTorchTrainingTask\x12i\n\x0fworker_replicas\x18\x01 \x01(\x0b\x32@.flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpecR\x0eworkerReplicas\x12i\n\x0fmaster_replicas\x18\x02 \x01(\x0b\x32@.flyteidl.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpecR\x0emasterReplicas\x12\x43\n\nrun_policy\x18\x03 \x01(\x0b\x32$.flyteidl.plugins.kubeflow.RunPolicyR\trunPolicy\"\xe2\x01\n%DistributedPyTorchTrainingReplicaSpec\x12\x1a\n\x08replicas\x18\x01 \x01(\x05R\x08replicas\x12\x14\n\x05image\x18\x02 \x01(\tR\x05image\x12\x36\n\tresources\x18\x03 \x01(\x0b\x32\x18.flyteidl.core.ResourcesR\tresources\x12O\n\x0erestart_policy\x18\x04 \x01(\x0e\x32(.flyteidl.plugins.kubeflow.RestartPolicyR\rrestartPolicyB\xec\x01\n\x1d\x63om.flyteidl.plugins.kubeflowB\x0cPytorchProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PK\xaa\x02\x19\x46lyteidl.Plugins.Kubeflow\xca\x02\x19\x46lyteidl\\Plugins\\Kubeflow\xe2\x02%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1b\x46lyteidl::Plugins::Kubeflowb\x06proto3') _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl.plugins.kubeflow.pytorch_pb2', globals()) @@ -22,8 +23,8 @@ DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\035com.flyteidl.plugins.kubeflowB\014PytorchProtoP\001Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\242\002\003FPK\252\002\031Flyteidl.Plugins.Kubeflow\312\002\031Flyteidl\\Plugins\\Kubeflow\342\002%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\352\002\033Flyteidl::Plugins::Kubeflow' - _DISTRIBUTEDPYTORCHTRAININGTASK._serialized_start=111 - _DISTRIBUTEDPYTORCHTRAININGTASK._serialized_end=507 - _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC._serialized_start=510 - _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC._serialized_end=702 + _DISTRIBUTEDPYTORCHTRAININGTASK._serialized_start=138 + _DISTRIBUTEDPYTORCHTRAININGTASK._serialized_end=453 + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC._serialized_start=456 + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC._serialized_end=682 # @@protoc_insertion_point(module_scope) diff --git a/gen/pb_python/flyteidl/plugins/kubeflow/pytorch_pb2.pyi b/gen/pb_python/flyteidl/plugins/kubeflow/pytorch_pb2.pyi index c02032262..d350a638d 100644 --- a/gen/pb_python/flyteidl/plugins/kubeflow/pytorch_pb2.pyi +++ b/gen/pb_python/flyteidl/plugins/kubeflow/pytorch_pb2.pyi @@ -1,3 +1,4 @@ +from flyteidl.core import tasks_pb2 as _tasks_pb2 from flyteidl.plugins.kubeflow import common_pb2 as _common_pb2 from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -6,23 +7,23 @@ from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Opti DESCRIPTOR: _descriptor.FileDescriptor class DistributedPyTorchTrainingReplicaSpec(_message.Message): - __slots__ = ["pod_template_name", "replicas", "restart_policy"] - POD_TEMPLATE_NAME_FIELD_NUMBER: _ClassVar[int] + __slots__ = ["image", "replicas", "resources", "restart_policy"] + IMAGE_FIELD_NUMBER: _ClassVar[int] REPLICAS_FIELD_NUMBER: _ClassVar[int] + RESOURCES_FIELD_NUMBER: _ClassVar[int] RESTART_POLICY_FIELD_NUMBER: _ClassVar[int] - pod_template_name: str + image: str replicas: int + resources: _tasks_pb2.Resources restart_policy: _common_pb2.RestartPolicy - def __init__(self, replicas: _Optional[int] = ..., pod_template_name: _Optional[str] = ..., restart_policy: _Optional[_Union[_common_pb2.RestartPolicy, str]] = ...) -> None: ... + def __init__(self, replicas: _Optional[int] = ..., image: _Optional[str] = ..., resources: _Optional[_Union[_tasks_pb2.Resources, _Mapping]] = ..., restart_policy: _Optional[_Union[_common_pb2.RestartPolicy, str]] = ...) -> None: ... class DistributedPyTorchTrainingTask(_message.Message): - __slots__ = ["master_replicas", "run_policy", "success_policy", "worker_replicas"] + __slots__ = ["master_replicas", "run_policy", "worker_replicas"] MASTER_REPLICAS_FIELD_NUMBER: _ClassVar[int] RUN_POLICY_FIELD_NUMBER: _ClassVar[int] - SUCCESS_POLICY_FIELD_NUMBER: _ClassVar[int] WORKER_REPLICAS_FIELD_NUMBER: _ClassVar[int] master_replicas: DistributedPyTorchTrainingReplicaSpec run_policy: _common_pb2.RunPolicy - success_policy: _common_pb2.SuccessPolicy worker_replicas: DistributedPyTorchTrainingReplicaSpec - def __init__(self, worker_replicas: _Optional[_Union[DistributedPyTorchTrainingReplicaSpec, _Mapping]] = ..., master_replicas: _Optional[_Union[DistributedPyTorchTrainingReplicaSpec, _Mapping]] = ..., run_policy: _Optional[_Union[_common_pb2.RunPolicy, _Mapping]] = ..., success_policy: _Optional[_Union[_common_pb2.SuccessPolicy, str]] = ...) -> None: ... + def __init__(self, worker_replicas: _Optional[_Union[DistributedPyTorchTrainingReplicaSpec, _Mapping]] = ..., master_replicas: _Optional[_Union[DistributedPyTorchTrainingReplicaSpec, _Mapping]] = ..., run_policy: _Optional[_Union[_common_pb2.RunPolicy, _Mapping]] = ...) -> None: ... diff --git a/gen/pb_python/flyteidl/plugins/kubeflow/tensorflow_pb2.py b/gen/pb_python/flyteidl/plugins/kubeflow/tensorflow_pb2.py index 2bb7f12d8..62f8fef2d 100644 --- a/gen/pb_python/flyteidl/plugins/kubeflow/tensorflow_pb2.py +++ b/gen/pb_python/flyteidl/plugins/kubeflow/tensorflow_pb2.py @@ -11,10 +11,11 @@ _sym_db = _symbol_database.Default() +from flyteidl.core import tasks_pb2 as flyteidl_dot_core_dot_tasks__pb2 from flyteidl.plugins.kubeflow import common_pb2 as flyteidl_dot_plugins_dot_kubeflow_dot_common__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n*flyteidl/plugins/kubeflow/tensorflow.proto\x12\x19\x66lyteidl.plugins.kubeflow\x1a&flyteidl/plugins/kubeflow/common.proto\"\xf9\x03\n!DistributedTensorflowTrainingTask\x12l\n\x0fworker_replicas\x18\x01 \x01(\x0b\x32\x43.flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\x0eworkerReplicas\x12\x64\n\x0bps_replicas\x18\x02 \x01(\x0b\x32\x43.flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\npsReplicas\x12j\n\x0e\x63hief_replicas\x18\x03 \x01(\x0b\x32\x43.flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\rchiefReplicas\x12\x43\n\nrun_policy\x18\x04 \x01(\x0b\x32$.flyteidl.plugins.kubeflow.RunPolicyR\trunPolicy\x12O\n\x0esuccess_policy\x18\x05 \x01(\x0e\x32(.flyteidl.plugins.kubeflow.SuccessPolicyR\rsuccessPolicy\"\xc3\x01\n(DistributedTensorflowTrainingReplicaSpec\x12\x1a\n\x08replicas\x18\x01 \x01(\x05R\x08replicas\x12*\n\x11pod_template_name\x18\x02 \x01(\tR\x0fpodTemplateName\x12O\n\x0erestart_policy\x18\x03 \x01(\x0e\x32(.flyteidl.plugins.kubeflow.RestartPolicyR\rrestartPolicyB\xef\x01\n\x1d\x63om.flyteidl.plugins.kubeflowB\x0fTensorflowProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PK\xaa\x02\x19\x46lyteidl.Plugins.Kubeflow\xca\x02\x19\x46lyteidl\\Plugins\\Kubeflow\xe2\x02%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1b\x46lyteidl::Plugins::Kubeflowb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n*flyteidl/plugins/kubeflow/tensorflow.proto\x12\x19\x66lyteidl.plugins.kubeflow\x1a\x19\x66lyteidl/core/tasks.proto\x1a&flyteidl/plugins/kubeflow/common.proto\"\xa8\x03\n!DistributedTensorflowTrainingTask\x12l\n\x0fworker_replicas\x18\x01 \x01(\x0b\x32\x43.flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\x0eworkerReplicas\x12\x64\n\x0bps_replicas\x18\x02 \x01(\x0b\x32\x43.flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\npsReplicas\x12j\n\x0e\x63hief_replicas\x18\x03 \x01(\x0b\x32\x43.flyteidl.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\rchiefReplicas\x12\x43\n\nrun_policy\x18\x04 \x01(\x0b\x32$.flyteidl.plugins.kubeflow.RunPolicyR\trunPolicy\"\xe5\x01\n(DistributedTensorflowTrainingReplicaSpec\x12\x1a\n\x08replicas\x18\x01 \x01(\x05R\x08replicas\x12\x14\n\x05image\x18\x02 \x01(\tR\x05image\x12\x36\n\tresources\x18\x03 \x01(\x0b\x32\x18.flyteidl.core.ResourcesR\tresources\x12O\n\x0erestart_policy\x18\x04 \x01(\x0e\x32(.flyteidl.plugins.kubeflow.RestartPolicyR\rrestartPolicyB\xef\x01\n\x1d\x63om.flyteidl.plugins.kubeflowB\x0fTensorflowProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PK\xaa\x02\x19\x46lyteidl.Plugins.Kubeflow\xca\x02\x19\x46lyteidl\\Plugins\\Kubeflow\xe2\x02%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1b\x46lyteidl::Plugins::Kubeflowb\x06proto3') _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl.plugins.kubeflow.tensorflow_pb2', globals()) @@ -22,8 +23,8 @@ DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\035com.flyteidl.plugins.kubeflowB\017TensorflowProtoP\001Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\242\002\003FPK\252\002\031Flyteidl.Plugins.Kubeflow\312\002\031Flyteidl\\Plugins\\Kubeflow\342\002%Flyteidl\\Plugins\\Kubeflow\\GPBMetadata\352\002\033Flyteidl::Plugins::Kubeflow' - _DISTRIBUTEDTENSORFLOWTRAININGTASK._serialized_start=114 - _DISTRIBUTEDTENSORFLOWTRAININGTASK._serialized_end=619 - _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC._serialized_start=622 - _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC._serialized_end=817 + _DISTRIBUTEDTENSORFLOWTRAININGTASK._serialized_start=141 + _DISTRIBUTEDTENSORFLOWTRAININGTASK._serialized_end=565 + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC._serialized_start=568 + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC._serialized_end=797 # @@protoc_insertion_point(module_scope) diff --git a/gen/pb_python/flyteidl/plugins/kubeflow/tensorflow_pb2.pyi b/gen/pb_python/flyteidl/plugins/kubeflow/tensorflow_pb2.pyi index e1dce3f93..c4a44f502 100644 --- a/gen/pb_python/flyteidl/plugins/kubeflow/tensorflow_pb2.pyi +++ b/gen/pb_python/flyteidl/plugins/kubeflow/tensorflow_pb2.pyi @@ -1,3 +1,4 @@ +from flyteidl.core import tasks_pb2 as _tasks_pb2 from flyteidl.plugins.kubeflow import common_pb2 as _common_pb2 from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -6,25 +7,25 @@ from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Opti DESCRIPTOR: _descriptor.FileDescriptor class DistributedTensorflowTrainingReplicaSpec(_message.Message): - __slots__ = ["pod_template_name", "replicas", "restart_policy"] - POD_TEMPLATE_NAME_FIELD_NUMBER: _ClassVar[int] + __slots__ = ["image", "replicas", "resources", "restart_policy"] + IMAGE_FIELD_NUMBER: _ClassVar[int] REPLICAS_FIELD_NUMBER: _ClassVar[int] + RESOURCES_FIELD_NUMBER: _ClassVar[int] RESTART_POLICY_FIELD_NUMBER: _ClassVar[int] - pod_template_name: str + image: str replicas: int + resources: _tasks_pb2.Resources restart_policy: _common_pb2.RestartPolicy - def __init__(self, replicas: _Optional[int] = ..., pod_template_name: _Optional[str] = ..., restart_policy: _Optional[_Union[_common_pb2.RestartPolicy, str]] = ...) -> None: ... + def __init__(self, replicas: _Optional[int] = ..., image: _Optional[str] = ..., resources: _Optional[_Union[_tasks_pb2.Resources, _Mapping]] = ..., restart_policy: _Optional[_Union[_common_pb2.RestartPolicy, str]] = ...) -> None: ... class DistributedTensorflowTrainingTask(_message.Message): - __slots__ = ["chief_replicas", "ps_replicas", "run_policy", "success_policy", "worker_replicas"] + __slots__ = ["chief_replicas", "ps_replicas", "run_policy", "worker_replicas"] CHIEF_REPLICAS_FIELD_NUMBER: _ClassVar[int] PS_REPLICAS_FIELD_NUMBER: _ClassVar[int] RUN_POLICY_FIELD_NUMBER: _ClassVar[int] - SUCCESS_POLICY_FIELD_NUMBER: _ClassVar[int] WORKER_REPLICAS_FIELD_NUMBER: _ClassVar[int] chief_replicas: DistributedTensorflowTrainingReplicaSpec ps_replicas: DistributedTensorflowTrainingReplicaSpec run_policy: _common_pb2.RunPolicy - success_policy: _common_pb2.SuccessPolicy worker_replicas: DistributedTensorflowTrainingReplicaSpec - def __init__(self, worker_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ..., ps_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ..., chief_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ..., run_policy: _Optional[_Union[_common_pb2.RunPolicy, _Mapping]] = ..., success_policy: _Optional[_Union[_common_pb2.SuccessPolicy, str]] = ...) -> None: ... + def __init__(self, worker_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ..., ps_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ..., chief_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ..., run_policy: _Optional[_Union[_common_pb2.RunPolicy, _Mapping]] = ...) -> None: ... diff --git a/gen/pb_python/flyteidl/plugins/mpi_pb2.py b/gen/pb_python/flyteidl/plugins/mpi_pb2.py new file mode 100644 index 000000000..6cdb20189 --- /dev/null +++ b/gen/pb_python/flyteidl/plugins/mpi_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl/plugins/mpi.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lyteidl/plugins/mpi.proto\x12\x10\x66lyteidl.plugins\"\x87\x01\n\x1a\x44istributedMPITrainingTask\x12\x1f\n\x0bnum_workers\x18\x01 \x01(\x05R\nnumWorkers\x12\x32\n\x15num_launcher_replicas\x18\x02 \x01(\x05R\x13numLauncherReplicas\x12\x14\n\x05slots\x18\x03 \x01(\x05R\x05slotsB\xba\x01\n\x14\x63om.flyteidl.pluginsB\x08MpiProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PX\xaa\x02\x10\x46lyteidl.Plugins\xca\x02\x10\x46lyteidl\\Plugins\xe2\x02\x1c\x46lyteidl\\Plugins\\GPBMetadata\xea\x02\x11\x46lyteidl::Pluginsb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl.plugins.mpi_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\024com.flyteidl.pluginsB\010MpiProtoP\001Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\242\002\003FPX\252\002\020Flyteidl.Plugins\312\002\020Flyteidl\\Plugins\342\002\034Flyteidl\\Plugins\\GPBMetadata\352\002\021Flyteidl::Plugins' + _DISTRIBUTEDMPITRAININGTASK._serialized_start=49 + _DISTRIBUTEDMPITRAININGTASK._serialized_end=184 +# @@protoc_insertion_point(module_scope) diff --git a/gen/pb_python/flyteidl/plugins/mpi_pb2.pyi b/gen/pb_python/flyteidl/plugins/mpi_pb2.pyi new file mode 100644 index 000000000..d4e48f9b4 --- /dev/null +++ b/gen/pb_python/flyteidl/plugins/mpi_pb2.pyi @@ -0,0 +1,15 @@ +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class DistributedMPITrainingTask(_message.Message): + __slots__ = ["num_launcher_replicas", "num_workers", "slots"] + NUM_LAUNCHER_REPLICAS_FIELD_NUMBER: _ClassVar[int] + NUM_WORKERS_FIELD_NUMBER: _ClassVar[int] + SLOTS_FIELD_NUMBER: _ClassVar[int] + num_launcher_replicas: int + num_workers: int + slots: int + def __init__(self, num_workers: _Optional[int] = ..., num_launcher_replicas: _Optional[int] = ..., slots: _Optional[int] = ...) -> None: ... diff --git a/gen/pb_python/flyteidl/plugins/mpi_pb2_grpc.py b/gen/pb_python/flyteidl/plugins/mpi_pb2_grpc.py new file mode 100644 index 000000000..2daafffeb --- /dev/null +++ b/gen/pb_python/flyteidl/plugins/mpi_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/pb_python/flyteidl/plugins/pytorch_pb2.py b/gen/pb_python/flyteidl/plugins/pytorch_pb2.py new file mode 100644 index 000000000..c2afe8cbb --- /dev/null +++ b/gen/pb_python/flyteidl/plugins/pytorch_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl/plugins/pytorch.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1e\x66lyteidl/plugins/pytorch.proto\x12\x10\x66lyteidl.plugins\":\n\x1e\x44istributedPyTorchTrainingTask\x12\x18\n\x07workers\x18\x01 \x01(\x05R\x07workersB\xbe\x01\n\x14\x63om.flyteidl.pluginsB\x0cPytorchProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PX\xaa\x02\x10\x46lyteidl.Plugins\xca\x02\x10\x46lyteidl\\Plugins\xe2\x02\x1c\x46lyteidl\\Plugins\\GPBMetadata\xea\x02\x11\x46lyteidl::Pluginsb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl.plugins.pytorch_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\024com.flyteidl.pluginsB\014PytorchProtoP\001Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\242\002\003FPX\252\002\020Flyteidl.Plugins\312\002\020Flyteidl\\Plugins\342\002\034Flyteidl\\Plugins\\GPBMetadata\352\002\021Flyteidl::Plugins' + _DISTRIBUTEDPYTORCHTRAININGTASK._serialized_start=52 + _DISTRIBUTEDPYTORCHTRAININGTASK._serialized_end=110 +# @@protoc_insertion_point(module_scope) diff --git a/gen/pb_python/flyteidl/plugins/pytorch_pb2.pyi b/gen/pb_python/flyteidl/plugins/pytorch_pb2.pyi new file mode 100644 index 000000000..c7f5cef66 --- /dev/null +++ b/gen/pb_python/flyteidl/plugins/pytorch_pb2.pyi @@ -0,0 +1,11 @@ +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class DistributedPyTorchTrainingTask(_message.Message): + __slots__ = ["workers"] + WORKERS_FIELD_NUMBER: _ClassVar[int] + workers: int + def __init__(self, workers: _Optional[int] = ...) -> None: ... diff --git a/gen/pb_python/flyteidl/plugins/pytorch_pb2_grpc.py b/gen/pb_python/flyteidl/plugins/pytorch_pb2_grpc.py new file mode 100644 index 000000000..2daafffeb --- /dev/null +++ b/gen/pb_python/flyteidl/plugins/pytorch_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/pb_python/flyteidl/plugins/tensorflow_pb2.py b/gen/pb_python/flyteidl/plugins/tensorflow_pb2.py new file mode 100644 index 000000000..5eb43278c --- /dev/null +++ b/gen/pb_python/flyteidl/plugins/tensorflow_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl/plugins/tensorflow.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!flyteidl/plugins/tensorflow.proto\x12\x10\x66lyteidl.plugins\"\x85\x01\n!DistributedTensorflowTrainingTask\x12\x18\n\x07workers\x18\x01 \x01(\x05R\x07workers\x12\x1f\n\x0bps_replicas\x18\x02 \x01(\x05R\npsReplicas\x12%\n\x0e\x63hief_replicas\x18\x03 \x01(\x05R\rchiefReplicasB\xc1\x01\n\x14\x63om.flyteidl.pluginsB\x0fTensorflowProtoP\x01Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\xa2\x02\x03\x46PX\xaa\x02\x10\x46lyteidl.Plugins\xca\x02\x10\x46lyteidl\\Plugins\xe2\x02\x1c\x46lyteidl\\Plugins\\GPBMetadata\xea\x02\x11\x46lyteidl::Pluginsb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl.plugins.tensorflow_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\024com.flyteidl.pluginsB\017TensorflowProtoP\001Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins\242\002\003FPX\252\002\020Flyteidl.Plugins\312\002\020Flyteidl\\Plugins\342\002\034Flyteidl\\Plugins\\GPBMetadata\352\002\021Flyteidl::Plugins' + _DISTRIBUTEDTENSORFLOWTRAININGTASK._serialized_start=56 + _DISTRIBUTEDTENSORFLOWTRAININGTASK._serialized_end=189 +# @@protoc_insertion_point(module_scope) diff --git a/gen/pb_python/flyteidl/plugins/tensorflow_pb2.pyi b/gen/pb_python/flyteidl/plugins/tensorflow_pb2.pyi new file mode 100644 index 000000000..8468f6c32 --- /dev/null +++ b/gen/pb_python/flyteidl/plugins/tensorflow_pb2.pyi @@ -0,0 +1,15 @@ +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class DistributedTensorflowTrainingTask(_message.Message): + __slots__ = ["chief_replicas", "ps_replicas", "workers"] + CHIEF_REPLICAS_FIELD_NUMBER: _ClassVar[int] + PS_REPLICAS_FIELD_NUMBER: _ClassVar[int] + WORKERS_FIELD_NUMBER: _ClassVar[int] + chief_replicas: int + ps_replicas: int + workers: int + def __init__(self, workers: _Optional[int] = ..., ps_replicas: _Optional[int] = ..., chief_replicas: _Optional[int] = ...) -> None: ... diff --git a/gen/pb_python/flyteidl/plugins/tensorflow_pb2_grpc.py b/gen/pb_python/flyteidl/plugins/tensorflow_pb2_grpc.py new file mode 100644 index 000000000..2daafffeb --- /dev/null +++ b/gen/pb_python/flyteidl/plugins/tensorflow_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/protos/flyteidl/plugins/kubeflow/common.proto b/protos/flyteidl/plugins/kubeflow/common.proto index 676c3b2b4..667cf444d 100644 --- a/protos/flyteidl/plugins/kubeflow/common.proto +++ b/protos/flyteidl/plugins/kubeflow/common.proto @@ -6,31 +6,29 @@ option go_package = "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins"; message RunPolicy { - // CleanPodPolicy defines the policy to kill pods after the job completes. - // Default to None. + // CleanPodPolicy defines the policy to kill pods after the job completes. Default to None. CleanPodPolicy clean_pod_policy = 1; - // TTL to clean up jobs. It may take extra ReconcilePeriod seconds for the cleanup, since - // reconcile gets called periodically. Default to infinite. + + // TTL to clean up jobs. Default to infinite. int32 ttl_seconds_after_finished = 2; // Specifies the duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer. int32 activeDeadlineSeconds = 3; - + // Number of retries before marking this job failed. int32 backoff_limit = 4; } -enum SuccessPolicy { - SUCCESS_POLICY_DEFAULT = 0; - SUCCESS_POLICY_ALL_WORKERS = 1; -} - enum CleanPodPolicy { - CLEANPOD_POLICY_UNDEFINED = 0; - CLEANPOD_POLICY_ALL = 1; - CLEANPOD_POLICY_RUNNING = 2; - CLEANPOD_POLICY_NONE = 3; + // The All policy means all pods even completed pods will be deleted immediately when the job finishes. + CLEANPOD_POLICY_ALL = 0; + + // The Running policy means that only pods still running when a job completes (e.g. parameter servers) will be deleted immediately; completed pods will not be deleted so that the logs will be preserved. This is the default value. + CLEANPOD_POLICY_RUNNING = 1; + + // The None policy means that no pods will be deleted when the job completes. + CLEANPOD_POLICY_NONE = 2; } enum RestartPolicy { diff --git a/protos/flyteidl/plugins/kubeflow/mpi.proto b/protos/flyteidl/plugins/kubeflow/mpi.proto index 7565c9aba..d0041a634 100644 --- a/protos/flyteidl/plugins/kubeflow/mpi.proto +++ b/protos/flyteidl/plugins/kubeflow/mpi.proto @@ -4,6 +4,7 @@ package flyteidl.plugins.kubeflow; option go_package = "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins"; +import "flyteidl/core/tasks.proto"; import "flyteidl/plugins/kubeflow/common.proto"; // Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator @@ -18,19 +19,18 @@ message DistributedMPITrainingTask { // job, for example how to clean up resources and how long the job can stay // active. RunPolicy run_policy = 3; - - // SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None. - SuccessPolicy success_policy = 4; } message DistributedMPITrainingReplicaSpec { - // Number of workers + // Number of replicas int32 replicas = 1; - // Unique name of a PodTemplate k8s resource to be used as the base configuration. - // PodTemplate specified here will be overriden by the pod template specified at the task metedata level. - string pod_template_name = 2; + // Image used for the replica group + string image = 2; - // Restart policy for the worker - RestartPolicy restart_policy = 3; + // Resources required for the replica group + core.Resources resources = 3; + + // RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows: + RestartPolicy restart_policy = 4; } \ No newline at end of file diff --git a/protos/flyteidl/plugins/kubeflow/pytorch.proto b/protos/flyteidl/plugins/kubeflow/pytorch.proto index a45eb71a9..a452b67d5 100644 --- a/protos/flyteidl/plugins/kubeflow/pytorch.proto +++ b/protos/flyteidl/plugins/kubeflow/pytorch.proto @@ -4,6 +4,7 @@ package flyteidl.plugins.kubeflow; option go_package = "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins"; +import "flyteidl/core/tasks.proto"; import "flyteidl/plugins/kubeflow/common.proto"; // Custom proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator @@ -18,19 +19,18 @@ message DistributedPyTorchTrainingTask { // job, for example how to clean up resources and how long the job can stay // active. RunPolicy run_policy = 3; - - // SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None. - SuccessPolicy success_policy = 4; } message DistributedPyTorchTrainingReplicaSpec { - // Number of workers + // Number of replicas int32 replicas = 1; - // Unique name of a PodTemplate k8s resource to be used as the base configuration. - // PodTemplate specified here will be overriden by the pod template specified at the task metedata level. - string pod_template_name = 2; + // Image used for the replica group + string image = 2; - // Restart policy for the worker - RestartPolicy restart_policy = 3; + // Resources required for the replica group + core.Resources resources = 3; + + // RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows: + RestartPolicy restart_policy = 4; } \ No newline at end of file diff --git a/protos/flyteidl/plugins/kubeflow/tensorflow.proto b/protos/flyteidl/plugins/kubeflow/tensorflow.proto index 3732d1785..d7e38b4c7 100644 --- a/protos/flyteidl/plugins/kubeflow/tensorflow.proto +++ b/protos/flyteidl/plugins/kubeflow/tensorflow.proto @@ -4,6 +4,7 @@ package flyteidl.plugins.kubeflow; option go_package = "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins"; +import "flyteidl/core/tasks.proto"; import "flyteidl/plugins/kubeflow/common.proto"; // Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator @@ -21,19 +22,18 @@ message DistributedTensorflowTrainingTask { // job, for example how to clean up resources and how long the job can stay // active. RunPolicy run_policy = 4; - - // SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to None. - SuccessPolicy success_policy = 5; } message DistributedTensorflowTrainingReplicaSpec { - // Number of workers + // Number of replicas int32 replicas = 1; - // Unique name of a PodTemplate k8s resource to be used as the base configuration. - // PodTemplate specified here will be overriden by the pod template specified at the task metedata level. - string pod_template_name = 2; + // Image used for the replica group + string image = 2; - // Restart policy for the worker - RestartPolicy restart_policy = 3; + // Resources required for the replica group + core.Resources resources = 3; + + // RestartPolicy Determines whether pods will be restarted when they exit. The allowed values are as follows: + RestartPolicy restart_policy = 4; } \ No newline at end of file diff --git a/protos/flyteidl/plugins/mpi.proto b/protos/flyteidl/plugins/mpi.proto new file mode 100644 index 000000000..8467d3de0 --- /dev/null +++ b/protos/flyteidl/plugins/mpi.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins"; + +// MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator +message DistributedMPITrainingTask { + // number of worker spawned in the cluster for this job + int32 num_workers = 1; + + // number of launcher replicas spawned in the cluster for this job + // The launcher pod invokes mpirun and communicates with worker pods through MPI. + int32 num_launcher_replicas = 2; + + // number of slots per worker used in hostfile. + // The available slots (GPUs) in each pod. + int32 slots = 3; +} \ No newline at end of file diff --git a/protos/flyteidl/plugins/pytorch.proto b/protos/flyteidl/plugins/pytorch.proto new file mode 100644 index 000000000..603de00c3 --- /dev/null +++ b/protos/flyteidl/plugins/pytorch.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins"; + +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator +message DistributedPyTorchTrainingTask { + // number of worker replicas spawned in the cluster for this job + int32 workers = 1; +} diff --git a/protos/flyteidl/plugins/tensorflow.proto b/protos/flyteidl/plugins/tensorflow.proto new file mode 100644 index 000000000..a24f871de --- /dev/null +++ b/protos/flyteidl/plugins/tensorflow.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins"; + +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator +message DistributedTensorflowTrainingTask { + // number of worker, ps, chief replicas spawned in the cluster for this job + int32 workers = 1; + // PS -> Parameter server + int32 ps_replicas = 2; + int32 chief_replicas = 3; +}