diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8452cea078..c94a9c1c78 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,6 +59,12 @@ jobs: with: workspaces: nexus + - uses: bufbuild/buf-setup-action@v1.28.1 + + - name: setup protos + run: | + ./generate_protos.sh + - name: cargo check run: cargo check working-directory: ./nexus diff --git a/.github/workflows/customer-docker.yml b/.github/workflows/customer-docker.yml index e710c7d860..6d9eb4dbdc 100644 --- a/.github/workflows/customer-docker.yml +++ b/.github/workflows/customer-docker.yml @@ -23,6 +23,12 @@ jobs: with: submodules: recursive + - uses: bufbuild/buf-setup-action@v1.28.1 + + - name: setup protos + run: | + ./generate_protos.sh + - uses: depot/setup-action@v1 - name: Login to GitHub Container Registry diff --git a/.github/workflows/dev-docker.yml b/.github/workflows/dev-docker.yml index 1dd4d9d5b6..0795fa85f3 100644 --- a/.github/workflows/dev-docker.yml +++ b/.github/workflows/dev-docker.yml @@ -22,6 +22,12 @@ jobs: with: submodules: recursive + - uses: bufbuild/buf-setup-action@v1.28.1 + + - name: setup protos + run: | + ./generate_protos.sh + - uses: depot/setup-action@v1 - name: Login to GitHub Container Registry diff --git a/.github/workflows/flow.yml b/.github/workflows/flow.yml index d0b0837c38..156105ed91 100644 --- a/.github/workflows/flow.yml +++ b/.github/workflows/flow.yml @@ -32,6 +32,12 @@ jobs: - name: checkout sources uses: actions/checkout@v4 + - uses: bufbuild/buf-setup-action@v1.28.1 + + - name: setup protos + run: | + ./generate_protos.sh + - uses: actions/setup-go@v4 with: go-version: ">=1.21.0" diff --git a/.github/workflows/golang-lint.yml b/.github/workflows/golang-lint.yml index 5b4b31441d..1216a69c3b 100644 --- a/.github/workflows/golang-lint.yml +++ b/.github/workflows/golang-lint.yml @@ -21,6 +21,12 @@ jobs: with: submodules: recursive + - uses: bufbuild/buf-setup-action@v1.28.1 + + - name: setup protos + run: | + ./generate_protos.sh + - name: golangci-lint uses: reviewdog/action-golangci-lint@v2 with: diff --git a/.github/workflows/rust-lint.yml b/.github/workflows/rust-lint.yml index c4a43ad791..b729cb2cb4 100644 --- a/.github/workflows/rust-lint.yml +++ b/.github/workflows/rust-lint.yml @@ -21,6 +21,12 @@ jobs: with: submodules: recursive + - uses: bufbuild/buf-setup-action@v1.28.1 + + - name: setup protos + run: | + ./generate_protos.sh + - uses: dtolnay/rust-toolchain@stable with: components: clippy diff --git a/.github/workflows/stable-docker.yml b/.github/workflows/stable-docker.yml index f4b7c9e9a1..822a88b67f 100644 --- a/.github/workflows/stable-docker.yml +++ b/.github/workflows/stable-docker.yml @@ -20,6 +20,12 @@ jobs: with: submodules: recursive + - uses: bufbuild/buf-setup-action@v1.28.1 + + - name: setup protos + run: | + ./generate_protos.sh + - uses: depot/setup-action@v1 - name: Login to GitHub Container Registry diff --git a/.github/workflows/ui-build.yml b/.github/workflows/ui-build.yml index 752bb9bd7f..74bc9e3348 100644 --- a/.github/workflows/ui-build.yml +++ b/.github/workflows/ui-build.yml @@ -18,6 +18,12 @@ jobs: - name: checkout uses: actions/checkout@v4 + - uses: bufbuild/buf-setup-action@v1.28.1 + + - name: setup protos + run: | + ./generate_protos.sh + - name: Install Node.js dependencies working-directory: ui run: npm ci diff --git a/.github/workflows/ui-lint.yml b/.github/workflows/ui-lint.yml index 48428e1572..b8cc8abe08 100644 --- a/.github/workflows/ui-lint.yml +++ b/.github/workflows/ui-lint.yml @@ -22,6 +22,12 @@ jobs: - name: checkout uses: actions/checkout@v4 + - uses: bufbuild/buf-setup-action@v1.28.1 + + - name: setup protos + run: | + ./generate_protos.sh + - name: Install Node.js dependencies working-directory: ui run: npm ci diff --git a/.gitignore b/.gitignore index c8909ff74a..c2b1468c78 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,7 @@ nexus/server/tests/results/actual/ go.work go.work.sum + +# generated protobuf files +ui/grpc_generated +flow/generated diff --git a/flow/generated/protos/flow.pb.go b/flow/generated/protos/flow.pb.go deleted file mode 100644 index deffa53986..0000000000 --- a/flow/generated/protos/flow.pb.go +++ /dev/null @@ -1,5075 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: flow.proto - -package protos - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// protos for qrep -type QRepSyncMode int32 - -const ( - QRepSyncMode_QREP_SYNC_MODE_MULTI_INSERT QRepSyncMode = 0 - QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO QRepSyncMode = 1 -) - -// Enum value maps for QRepSyncMode. -var ( - QRepSyncMode_name = map[int32]string{ - 0: "QREP_SYNC_MODE_MULTI_INSERT", - 1: "QREP_SYNC_MODE_STORAGE_AVRO", - } - QRepSyncMode_value = map[string]int32{ - "QREP_SYNC_MODE_MULTI_INSERT": 0, - "QREP_SYNC_MODE_STORAGE_AVRO": 1, - } -) - -func (x QRepSyncMode) Enum() *QRepSyncMode { - p := new(QRepSyncMode) - *p = x - return p -} - -func (x QRepSyncMode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (QRepSyncMode) Descriptor() protoreflect.EnumDescriptor { - return file_flow_proto_enumTypes[0].Descriptor() -} - -func (QRepSyncMode) Type() protoreflect.EnumType { - return &file_flow_proto_enumTypes[0] -} - -func (x QRepSyncMode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use QRepSyncMode.Descriptor instead. -func (QRepSyncMode) EnumDescriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{0} -} - -type QRepWriteType int32 - -const ( - QRepWriteType_QREP_WRITE_MODE_APPEND QRepWriteType = 0 - QRepWriteType_QREP_WRITE_MODE_UPSERT QRepWriteType = 1 - // only valid when initial_copy_true is set to true. TRUNCATES tables before reverting to APPEND. - QRepWriteType_QREP_WRITE_MODE_OVERWRITE QRepWriteType = 2 -) - -// Enum value maps for QRepWriteType. -var ( - QRepWriteType_name = map[int32]string{ - 0: "QREP_WRITE_MODE_APPEND", - 1: "QREP_WRITE_MODE_UPSERT", - 2: "QREP_WRITE_MODE_OVERWRITE", - } - QRepWriteType_value = map[string]int32{ - "QREP_WRITE_MODE_APPEND": 0, - "QREP_WRITE_MODE_UPSERT": 1, - "QREP_WRITE_MODE_OVERWRITE": 2, - } -) - -func (x QRepWriteType) Enum() *QRepWriteType { - p := new(QRepWriteType) - *p = x - return p -} - -func (x QRepWriteType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (QRepWriteType) Descriptor() protoreflect.EnumDescriptor { - return file_flow_proto_enumTypes[1].Descriptor() -} - -func (QRepWriteType) Type() protoreflect.EnumType { - return &file_flow_proto_enumTypes[1] -} - -func (x QRepWriteType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use QRepWriteType.Descriptor instead. -func (QRepWriteType) EnumDescriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{1} -} - -type TableNameMapping struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SourceTableName string `protobuf:"bytes,1,opt,name=source_table_name,json=sourceTableName,proto3" json:"source_table_name,omitempty"` - DestinationTableName string `protobuf:"bytes,2,opt,name=destination_table_name,json=destinationTableName,proto3" json:"destination_table_name,omitempty"` -} - -func (x *TableNameMapping) Reset() { - *x = TableNameMapping{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TableNameMapping) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TableNameMapping) ProtoMessage() {} - -func (x *TableNameMapping) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TableNameMapping.ProtoReflect.Descriptor instead. -func (*TableNameMapping) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{0} -} - -func (x *TableNameMapping) GetSourceTableName() string { - if x != nil { - return x.SourceTableName - } - return "" -} - -func (x *TableNameMapping) GetDestinationTableName() string { - if x != nil { - return x.DestinationTableName - } - return "" -} - -type RelationMessageColumn struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Flags uint32 `protobuf:"varint,1,opt,name=flags,proto3" json:"flags,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - DataType uint32 `protobuf:"varint,3,opt,name=data_type,json=dataType,proto3" json:"data_type,omitempty"` -} - -func (x *RelationMessageColumn) Reset() { - *x = RelationMessageColumn{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RelationMessageColumn) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RelationMessageColumn) ProtoMessage() {} - -func (x *RelationMessageColumn) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RelationMessageColumn.ProtoReflect.Descriptor instead. -func (*RelationMessageColumn) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{1} -} - -func (x *RelationMessageColumn) GetFlags() uint32 { - if x != nil { - return x.Flags - } - return 0 -} - -func (x *RelationMessageColumn) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *RelationMessageColumn) GetDataType() uint32 { - if x != nil { - return x.DataType - } - return 0 -} - -type RelationMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RelationId uint32 `protobuf:"varint,1,opt,name=relation_id,json=relationId,proto3" json:"relation_id,omitempty"` - RelationName string `protobuf:"bytes,2,opt,name=relation_name,json=relationName,proto3" json:"relation_name,omitempty"` - Columns []*RelationMessageColumn `protobuf:"bytes,3,rep,name=columns,proto3" json:"columns,omitempty"` -} - -func (x *RelationMessage) Reset() { - *x = RelationMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RelationMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RelationMessage) ProtoMessage() {} - -func (x *RelationMessage) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RelationMessage.ProtoReflect.Descriptor instead. -func (*RelationMessage) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{2} -} - -func (x *RelationMessage) GetRelationId() uint32 { - if x != nil { - return x.RelationId - } - return 0 -} - -func (x *RelationMessage) GetRelationName() string { - if x != nil { - return x.RelationName - } - return "" -} - -func (x *RelationMessage) GetColumns() []*RelationMessageColumn { - if x != nil { - return x.Columns - } - return nil -} - -type TableMapping struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SourceTableIdentifier string `protobuf:"bytes,1,opt,name=source_table_identifier,json=sourceTableIdentifier,proto3" json:"source_table_identifier,omitempty"` - DestinationTableIdentifier string `protobuf:"bytes,2,opt,name=destination_table_identifier,json=destinationTableIdentifier,proto3" json:"destination_table_identifier,omitempty"` - PartitionKey string `protobuf:"bytes,3,opt,name=partition_key,json=partitionKey,proto3" json:"partition_key,omitempty"` - Exclude []string `protobuf:"bytes,4,rep,name=exclude,proto3" json:"exclude,omitempty"` -} - -func (x *TableMapping) Reset() { - *x = TableMapping{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TableMapping) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TableMapping) ProtoMessage() {} - -func (x *TableMapping) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TableMapping.ProtoReflect.Descriptor instead. -func (*TableMapping) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{3} -} - -func (x *TableMapping) GetSourceTableIdentifier() string { - if x != nil { - return x.SourceTableIdentifier - } - return "" -} - -func (x *TableMapping) GetDestinationTableIdentifier() string { - if x != nil { - return x.DestinationTableIdentifier - } - return "" -} - -func (x *TableMapping) GetPartitionKey() string { - if x != nil { - return x.PartitionKey - } - return "" -} - -func (x *TableMapping) GetExclude() []string { - if x != nil { - return x.Exclude - } - return nil -} - -type SetupInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Peer *Peer `protobuf:"bytes,1,opt,name=peer,proto3" json:"peer,omitempty"` - FlowName string `protobuf:"bytes,2,opt,name=flow_name,json=flowName,proto3" json:"flow_name,omitempty"` -} - -func (x *SetupInput) Reset() { - *x = SetupInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetupInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetupInput) ProtoMessage() {} - -func (x *SetupInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetupInput.ProtoReflect.Descriptor instead. -func (*SetupInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{4} -} - -func (x *SetupInput) GetPeer() *Peer { - if x != nil { - return x.Peer - } - return nil -} - -func (x *SetupInput) GetFlowName() string { - if x != nil { - return x.FlowName - } - return "" -} - -type FlowConnectionConfigs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Source *Peer `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - Destination *Peer `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"` - FlowJobName string `protobuf:"bytes,3,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - TableSchema *TableSchema `protobuf:"bytes,4,opt,name=table_schema,json=tableSchema,proto3" json:"table_schema,omitempty"` - TableMappings []*TableMapping `protobuf:"bytes,5,rep,name=table_mappings,json=tableMappings,proto3" json:"table_mappings,omitempty"` - SrcTableIdNameMapping map[uint32]string `protobuf:"bytes,6,rep,name=src_table_id_name_mapping,json=srcTableIdNameMapping,proto3" json:"src_table_id_name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - TableNameSchemaMapping map[string]*TableSchema `protobuf:"bytes,7,rep,name=table_name_schema_mapping,json=tableNameSchemaMapping,proto3" json:"table_name_schema_mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // This is an optional peer that will be used to hold metadata in cases where - // the destination isn't ideal for holding metadata. - MetadataPeer *Peer `protobuf:"bytes,8,opt,name=metadata_peer,json=metadataPeer,proto3" json:"metadata_peer,omitempty"` - MaxBatchSize uint32 `protobuf:"varint,9,opt,name=max_batch_size,json=maxBatchSize,proto3" json:"max_batch_size,omitempty"` - DoInitialCopy bool `protobuf:"varint,10,opt,name=do_initial_copy,json=doInitialCopy,proto3" json:"do_initial_copy,omitempty"` - PublicationName string `protobuf:"bytes,11,opt,name=publication_name,json=publicationName,proto3" json:"publication_name,omitempty"` - SnapshotNumRowsPerPartition uint32 `protobuf:"varint,12,opt,name=snapshot_num_rows_per_partition,json=snapshotNumRowsPerPartition,proto3" json:"snapshot_num_rows_per_partition,omitempty"` - // max parallel workers is per table - SnapshotMaxParallelWorkers uint32 `protobuf:"varint,13,opt,name=snapshot_max_parallel_workers,json=snapshotMaxParallelWorkers,proto3" json:"snapshot_max_parallel_workers,omitempty"` - SnapshotNumTablesInParallel uint32 `protobuf:"varint,14,opt,name=snapshot_num_tables_in_parallel,json=snapshotNumTablesInParallel,proto3" json:"snapshot_num_tables_in_parallel,omitempty"` - SnapshotSyncMode QRepSyncMode `protobuf:"varint,15,opt,name=snapshot_sync_mode,json=snapshotSyncMode,proto3,enum=peerdb_flow.QRepSyncMode" json:"snapshot_sync_mode,omitempty"` - CdcSyncMode QRepSyncMode `protobuf:"varint,16,opt,name=cdc_sync_mode,json=cdcSyncMode,proto3,enum=peerdb_flow.QRepSyncMode" json:"cdc_sync_mode,omitempty"` - SnapshotStagingPath string `protobuf:"bytes,17,opt,name=snapshot_staging_path,json=snapshotStagingPath,proto3" json:"snapshot_staging_path,omitempty"` - CdcStagingPath string `protobuf:"bytes,18,opt,name=cdc_staging_path,json=cdcStagingPath,proto3" json:"cdc_staging_path,omitempty"` - // currently only works for snowflake - SoftDelete bool `protobuf:"varint,19,opt,name=soft_delete,json=softDelete,proto3" json:"soft_delete,omitempty"` - ReplicationSlotName string `protobuf:"bytes,20,opt,name=replication_slot_name,json=replicationSlotName,proto3" json:"replication_slot_name,omitempty"` - // the below two are for eventhub only - PushBatchSize int64 `protobuf:"varint,21,opt,name=push_batch_size,json=pushBatchSize,proto3" json:"push_batch_size,omitempty"` - PushParallelism int64 `protobuf:"varint,22,opt,name=push_parallelism,json=pushParallelism,proto3" json:"push_parallelism,omitempty"` - // if true, then the flow will be resynced - // create new tables with "_resync" suffix, perform initial load and then swap the new tables with the old ones - // to be used after the old mirror is dropped - Resync bool `protobuf:"varint,23,opt,name=resync,proto3" json:"resync,omitempty"` - SoftDeleteColName string `protobuf:"bytes,24,opt,name=soft_delete_col_name,json=softDeleteColName,proto3" json:"soft_delete_col_name,omitempty"` - SyncedAtColName string `protobuf:"bytes,25,opt,name=synced_at_col_name,json=syncedAtColName,proto3" json:"synced_at_col_name,omitempty"` - InitialCopyOnly bool `protobuf:"varint,26,opt,name=initial_copy_only,json=initialCopyOnly,proto3" json:"initial_copy_only,omitempty"` -} - -func (x *FlowConnectionConfigs) Reset() { - *x = FlowConnectionConfigs{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FlowConnectionConfigs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FlowConnectionConfigs) ProtoMessage() {} - -func (x *FlowConnectionConfigs) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FlowConnectionConfigs.ProtoReflect.Descriptor instead. -func (*FlowConnectionConfigs) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{5} -} - -func (x *FlowConnectionConfigs) GetSource() *Peer { - if x != nil { - return x.Source - } - return nil -} - -func (x *FlowConnectionConfigs) GetDestination() *Peer { - if x != nil { - return x.Destination - } - return nil -} - -func (x *FlowConnectionConfigs) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *FlowConnectionConfigs) GetTableSchema() *TableSchema { - if x != nil { - return x.TableSchema - } - return nil -} - -func (x *FlowConnectionConfigs) GetTableMappings() []*TableMapping { - if x != nil { - return x.TableMappings - } - return nil -} - -func (x *FlowConnectionConfigs) GetSrcTableIdNameMapping() map[uint32]string { - if x != nil { - return x.SrcTableIdNameMapping - } - return nil -} - -func (x *FlowConnectionConfigs) GetTableNameSchemaMapping() map[string]*TableSchema { - if x != nil { - return x.TableNameSchemaMapping - } - return nil -} - -func (x *FlowConnectionConfigs) GetMetadataPeer() *Peer { - if x != nil { - return x.MetadataPeer - } - return nil -} - -func (x *FlowConnectionConfigs) GetMaxBatchSize() uint32 { - if x != nil { - return x.MaxBatchSize - } - return 0 -} - -func (x *FlowConnectionConfigs) GetDoInitialCopy() bool { - if x != nil { - return x.DoInitialCopy - } - return false -} - -func (x *FlowConnectionConfigs) GetPublicationName() string { - if x != nil { - return x.PublicationName - } - return "" -} - -func (x *FlowConnectionConfigs) GetSnapshotNumRowsPerPartition() uint32 { - if x != nil { - return x.SnapshotNumRowsPerPartition - } - return 0 -} - -func (x *FlowConnectionConfigs) GetSnapshotMaxParallelWorkers() uint32 { - if x != nil { - return x.SnapshotMaxParallelWorkers - } - return 0 -} - -func (x *FlowConnectionConfigs) GetSnapshotNumTablesInParallel() uint32 { - if x != nil { - return x.SnapshotNumTablesInParallel - } - return 0 -} - -func (x *FlowConnectionConfigs) GetSnapshotSyncMode() QRepSyncMode { - if x != nil { - return x.SnapshotSyncMode - } - return QRepSyncMode_QREP_SYNC_MODE_MULTI_INSERT -} - -func (x *FlowConnectionConfigs) GetCdcSyncMode() QRepSyncMode { - if x != nil { - return x.CdcSyncMode - } - return QRepSyncMode_QREP_SYNC_MODE_MULTI_INSERT -} - -func (x *FlowConnectionConfigs) GetSnapshotStagingPath() string { - if x != nil { - return x.SnapshotStagingPath - } - return "" -} - -func (x *FlowConnectionConfigs) GetCdcStagingPath() string { - if x != nil { - return x.CdcStagingPath - } - return "" -} - -func (x *FlowConnectionConfigs) GetSoftDelete() bool { - if x != nil { - return x.SoftDelete - } - return false -} - -func (x *FlowConnectionConfigs) GetReplicationSlotName() string { - if x != nil { - return x.ReplicationSlotName - } - return "" -} - -func (x *FlowConnectionConfigs) GetPushBatchSize() int64 { - if x != nil { - return x.PushBatchSize - } - return 0 -} - -func (x *FlowConnectionConfigs) GetPushParallelism() int64 { - if x != nil { - return x.PushParallelism - } - return 0 -} - -func (x *FlowConnectionConfigs) GetResync() bool { - if x != nil { - return x.Resync - } - return false -} - -func (x *FlowConnectionConfigs) GetSoftDeleteColName() string { - if x != nil { - return x.SoftDeleteColName - } - return "" -} - -func (x *FlowConnectionConfigs) GetSyncedAtColName() string { - if x != nil { - return x.SyncedAtColName - } - return "" -} - -func (x *FlowConnectionConfigs) GetInitialCopyOnly() bool { - if x != nil { - return x.InitialCopyOnly - } - return false -} - -type RenameTableOption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CurrentName string `protobuf:"bytes,1,opt,name=current_name,json=currentName,proto3" json:"current_name,omitempty"` - NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` - TableSchema *TableSchema `protobuf:"bytes,3,opt,name=table_schema,json=tableSchema,proto3" json:"table_schema,omitempty"` -} - -func (x *RenameTableOption) Reset() { - *x = RenameTableOption{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RenameTableOption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RenameTableOption) ProtoMessage() {} - -func (x *RenameTableOption) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RenameTableOption.ProtoReflect.Descriptor instead. -func (*RenameTableOption) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{6} -} - -func (x *RenameTableOption) GetCurrentName() string { - if x != nil { - return x.CurrentName - } - return "" -} - -func (x *RenameTableOption) GetNewName() string { - if x != nil { - return x.NewName - } - return "" -} - -func (x *RenameTableOption) GetTableSchema() *TableSchema { - if x != nil { - return x.TableSchema - } - return nil -} - -type RenameTablesInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowJobName string `protobuf:"bytes,1,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3" json:"peer,omitempty"` - RenameTableOptions []*RenameTableOption `protobuf:"bytes,3,rep,name=rename_table_options,json=renameTableOptions,proto3" json:"rename_table_options,omitempty"` - SoftDeleteColName *string `protobuf:"bytes,4,opt,name=soft_delete_col_name,json=softDeleteColName,proto3,oneof" json:"soft_delete_col_name,omitempty"` - SyncedAtColName *string `protobuf:"bytes,5,opt,name=synced_at_col_name,json=syncedAtColName,proto3,oneof" json:"synced_at_col_name,omitempty"` -} - -func (x *RenameTablesInput) Reset() { - *x = RenameTablesInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RenameTablesInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RenameTablesInput) ProtoMessage() {} - -func (x *RenameTablesInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RenameTablesInput.ProtoReflect.Descriptor instead. -func (*RenameTablesInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{7} -} - -func (x *RenameTablesInput) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *RenameTablesInput) GetPeer() *Peer { - if x != nil { - return x.Peer - } - return nil -} - -func (x *RenameTablesInput) GetRenameTableOptions() []*RenameTableOption { - if x != nil { - return x.RenameTableOptions - } - return nil -} - -func (x *RenameTablesInput) GetSoftDeleteColName() string { - if x != nil && x.SoftDeleteColName != nil { - return *x.SoftDeleteColName - } - return "" -} - -func (x *RenameTablesInput) GetSyncedAtColName() string { - if x != nil && x.SyncedAtColName != nil { - return *x.SyncedAtColName - } - return "" -} - -type RenameTablesOutput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowJobName string `protobuf:"bytes,1,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` -} - -func (x *RenameTablesOutput) Reset() { - *x = RenameTablesOutput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RenameTablesOutput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RenameTablesOutput) ProtoMessage() {} - -func (x *RenameTablesOutput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RenameTablesOutput.ProtoReflect.Descriptor instead. -func (*RenameTablesOutput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{8} -} - -func (x *RenameTablesOutput) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -type CreateTablesFromExistingInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowJobName string `protobuf:"bytes,1,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3" json:"peer,omitempty"` - NewToExistingTableMapping map[string]string `protobuf:"bytes,3,rep,name=new_to_existing_table_mapping,json=newToExistingTableMapping,proto3" json:"new_to_existing_table_mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *CreateTablesFromExistingInput) Reset() { - *x = CreateTablesFromExistingInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateTablesFromExistingInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateTablesFromExistingInput) ProtoMessage() {} - -func (x *CreateTablesFromExistingInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateTablesFromExistingInput.ProtoReflect.Descriptor instead. -func (*CreateTablesFromExistingInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{9} -} - -func (x *CreateTablesFromExistingInput) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *CreateTablesFromExistingInput) GetPeer() *Peer { - if x != nil { - return x.Peer - } - return nil -} - -func (x *CreateTablesFromExistingInput) GetNewToExistingTableMapping() map[string]string { - if x != nil { - return x.NewToExistingTableMapping - } - return nil -} - -type CreateTablesFromExistingOutput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowJobName string `protobuf:"bytes,2,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` -} - -func (x *CreateTablesFromExistingOutput) Reset() { - *x = CreateTablesFromExistingOutput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateTablesFromExistingOutput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateTablesFromExistingOutput) ProtoMessage() {} - -func (x *CreateTablesFromExistingOutput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateTablesFromExistingOutput.ProtoReflect.Descriptor instead. -func (*CreateTablesFromExistingOutput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{10} -} - -func (x *CreateTablesFromExistingOutput) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -type SyncFlowOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - BatchSize int32 `protobuf:"varint,1,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` - RelationMessageMapping map[uint32]*RelationMessage `protobuf:"bytes,2,rep,name=relation_message_mapping,json=relationMessageMapping,proto3" json:"relation_message_mapping,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *SyncFlowOptions) Reset() { - *x = SyncFlowOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SyncFlowOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SyncFlowOptions) ProtoMessage() {} - -func (x *SyncFlowOptions) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SyncFlowOptions.ProtoReflect.Descriptor instead. -func (*SyncFlowOptions) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{11} -} - -func (x *SyncFlowOptions) GetBatchSize() int32 { - if x != nil { - return x.BatchSize - } - return 0 -} - -func (x *SyncFlowOptions) GetRelationMessageMapping() map[uint32]*RelationMessage { - if x != nil { - return x.RelationMessageMapping - } - return nil -} - -type NormalizeFlowOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - BatchSize int32 `protobuf:"varint,1,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` -} - -func (x *NormalizeFlowOptions) Reset() { - *x = NormalizeFlowOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NormalizeFlowOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NormalizeFlowOptions) ProtoMessage() {} - -func (x *NormalizeFlowOptions) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NormalizeFlowOptions.ProtoReflect.Descriptor instead. -func (*NormalizeFlowOptions) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{12} -} - -func (x *NormalizeFlowOptions) GetBatchSize() int32 { - if x != nil { - return x.BatchSize - } - return 0 -} - -type LastSyncState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Checkpoint int64 `protobuf:"varint,1,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - LastSyncedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_synced_at,json=lastSyncedAt,proto3" json:"last_synced_at,omitempty"` -} - -func (x *LastSyncState) Reset() { - *x = LastSyncState{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LastSyncState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LastSyncState) ProtoMessage() {} - -func (x *LastSyncState) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LastSyncState.ProtoReflect.Descriptor instead. -func (*LastSyncState) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{13} -} - -func (x *LastSyncState) GetCheckpoint() int64 { - if x != nil { - return x.Checkpoint - } - return 0 -} - -func (x *LastSyncState) GetLastSyncedAt() *timestamppb.Timestamp { - if x != nil { - return x.LastSyncedAt - } - return nil -} - -type StartFlowInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - LastSyncState *LastSyncState `protobuf:"bytes,1,opt,name=last_sync_state,json=lastSyncState,proto3" json:"last_sync_state,omitempty"` - FlowConnectionConfigs *FlowConnectionConfigs `protobuf:"bytes,2,opt,name=flow_connection_configs,json=flowConnectionConfigs,proto3" json:"flow_connection_configs,omitempty"` - SyncFlowOptions *SyncFlowOptions `protobuf:"bytes,3,opt,name=sync_flow_options,json=syncFlowOptions,proto3" json:"sync_flow_options,omitempty"` - RelationMessageMapping map[uint32]*RelationMessage `protobuf:"bytes,4,rep,name=relation_message_mapping,json=relationMessageMapping,proto3" json:"relation_message_mapping,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *StartFlowInput) Reset() { - *x = StartFlowInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartFlowInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartFlowInput) ProtoMessage() {} - -func (x *StartFlowInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartFlowInput.ProtoReflect.Descriptor instead. -func (*StartFlowInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{14} -} - -func (x *StartFlowInput) GetLastSyncState() *LastSyncState { - if x != nil { - return x.LastSyncState - } - return nil -} - -func (x *StartFlowInput) GetFlowConnectionConfigs() *FlowConnectionConfigs { - if x != nil { - return x.FlowConnectionConfigs - } - return nil -} - -func (x *StartFlowInput) GetSyncFlowOptions() *SyncFlowOptions { - if x != nil { - return x.SyncFlowOptions - } - return nil -} - -func (x *StartFlowInput) GetRelationMessageMapping() map[uint32]*RelationMessage { - if x != nil { - return x.RelationMessageMapping - } - return nil -} - -type StartNormalizeInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowConnectionConfigs *FlowConnectionConfigs `protobuf:"bytes,1,opt,name=flow_connection_configs,json=flowConnectionConfigs,proto3" json:"flow_connection_configs,omitempty"` -} - -func (x *StartNormalizeInput) Reset() { - *x = StartNormalizeInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartNormalizeInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartNormalizeInput) ProtoMessage() {} - -func (x *StartNormalizeInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartNormalizeInput.ProtoReflect.Descriptor instead. -func (*StartNormalizeInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{15} -} - -func (x *StartNormalizeInput) GetFlowConnectionConfigs() *FlowConnectionConfigs { - if x != nil { - return x.FlowConnectionConfigs - } - return nil -} - -type GetLastSyncedIDInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerConnectionConfig *Peer `protobuf:"bytes,1,opt,name=peer_connection_config,json=peerConnectionConfig,proto3" json:"peer_connection_config,omitempty"` - FlowJobName string `protobuf:"bytes,2,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` -} - -func (x *GetLastSyncedIDInput) Reset() { - *x = GetLastSyncedIDInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetLastSyncedIDInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetLastSyncedIDInput) ProtoMessage() {} - -func (x *GetLastSyncedIDInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetLastSyncedIDInput.ProtoReflect.Descriptor instead. -func (*GetLastSyncedIDInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{16} -} - -func (x *GetLastSyncedIDInput) GetPeerConnectionConfig() *Peer { - if x != nil { - return x.PeerConnectionConfig - } - return nil -} - -func (x *GetLastSyncedIDInput) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -type EnsurePullabilityInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerConnectionConfig *Peer `protobuf:"bytes,1,opt,name=peer_connection_config,json=peerConnectionConfig,proto3" json:"peer_connection_config,omitempty"` - FlowJobName string `protobuf:"bytes,2,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - SourceTableIdentifier string `protobuf:"bytes,3,opt,name=source_table_identifier,json=sourceTableIdentifier,proto3" json:"source_table_identifier,omitempty"` -} - -func (x *EnsurePullabilityInput) Reset() { - *x = EnsurePullabilityInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnsurePullabilityInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnsurePullabilityInput) ProtoMessage() {} - -func (x *EnsurePullabilityInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnsurePullabilityInput.ProtoReflect.Descriptor instead. -func (*EnsurePullabilityInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{17} -} - -func (x *EnsurePullabilityInput) GetPeerConnectionConfig() *Peer { - if x != nil { - return x.PeerConnectionConfig - } - return nil -} - -func (x *EnsurePullabilityInput) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *EnsurePullabilityInput) GetSourceTableIdentifier() string { - if x != nil { - return x.SourceTableIdentifier - } - return "" -} - -type EnsurePullabilityBatchInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerConnectionConfig *Peer `protobuf:"bytes,1,opt,name=peer_connection_config,json=peerConnectionConfig,proto3" json:"peer_connection_config,omitempty"` - FlowJobName string `protobuf:"bytes,2,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - SourceTableIdentifiers []string `protobuf:"bytes,3,rep,name=source_table_identifiers,json=sourceTableIdentifiers,proto3" json:"source_table_identifiers,omitempty"` -} - -func (x *EnsurePullabilityBatchInput) Reset() { - *x = EnsurePullabilityBatchInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnsurePullabilityBatchInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnsurePullabilityBatchInput) ProtoMessage() {} - -func (x *EnsurePullabilityBatchInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnsurePullabilityBatchInput.ProtoReflect.Descriptor instead. -func (*EnsurePullabilityBatchInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{18} -} - -func (x *EnsurePullabilityBatchInput) GetPeerConnectionConfig() *Peer { - if x != nil { - return x.PeerConnectionConfig - } - return nil -} - -func (x *EnsurePullabilityBatchInput) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *EnsurePullabilityBatchInput) GetSourceTableIdentifiers() []string { - if x != nil { - return x.SourceTableIdentifiers - } - return nil -} - -type PostgresTableIdentifier struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RelId uint32 `protobuf:"varint,1,opt,name=rel_id,json=relId,proto3" json:"rel_id,omitempty"` -} - -func (x *PostgresTableIdentifier) Reset() { - *x = PostgresTableIdentifier{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PostgresTableIdentifier) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PostgresTableIdentifier) ProtoMessage() {} - -func (x *PostgresTableIdentifier) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PostgresTableIdentifier.ProtoReflect.Descriptor instead. -func (*PostgresTableIdentifier) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{19} -} - -func (x *PostgresTableIdentifier) GetRelId() uint32 { - if x != nil { - return x.RelId - } - return 0 -} - -type TableIdentifier struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to TableIdentifier: - // - // *TableIdentifier_PostgresTableIdentifier - TableIdentifier isTableIdentifier_TableIdentifier `protobuf_oneof:"table_identifier"` -} - -func (x *TableIdentifier) Reset() { - *x = TableIdentifier{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TableIdentifier) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TableIdentifier) ProtoMessage() {} - -func (x *TableIdentifier) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TableIdentifier.ProtoReflect.Descriptor instead. -func (*TableIdentifier) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{20} -} - -func (m *TableIdentifier) GetTableIdentifier() isTableIdentifier_TableIdentifier { - if m != nil { - return m.TableIdentifier - } - return nil -} - -func (x *TableIdentifier) GetPostgresTableIdentifier() *PostgresTableIdentifier { - if x, ok := x.GetTableIdentifier().(*TableIdentifier_PostgresTableIdentifier); ok { - return x.PostgresTableIdentifier - } - return nil -} - -type isTableIdentifier_TableIdentifier interface { - isTableIdentifier_TableIdentifier() -} - -type TableIdentifier_PostgresTableIdentifier struct { - PostgresTableIdentifier *PostgresTableIdentifier `protobuf:"bytes,1,opt,name=postgres_table_identifier,json=postgresTableIdentifier,proto3,oneof"` -} - -func (*TableIdentifier_PostgresTableIdentifier) isTableIdentifier_TableIdentifier() {} - -type EnsurePullabilityOutput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TableIdentifier *TableIdentifier `protobuf:"bytes,1,opt,name=table_identifier,json=tableIdentifier,proto3" json:"table_identifier,omitempty"` -} - -func (x *EnsurePullabilityOutput) Reset() { - *x = EnsurePullabilityOutput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnsurePullabilityOutput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnsurePullabilityOutput) ProtoMessage() {} - -func (x *EnsurePullabilityOutput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnsurePullabilityOutput.ProtoReflect.Descriptor instead. -func (*EnsurePullabilityOutput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{21} -} - -func (x *EnsurePullabilityOutput) GetTableIdentifier() *TableIdentifier { - if x != nil { - return x.TableIdentifier - } - return nil -} - -type EnsurePullabilityBatchOutput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TableIdentifierMapping map[string]*TableIdentifier `protobuf:"bytes,1,rep,name=table_identifier_mapping,json=tableIdentifierMapping,proto3" json:"table_identifier_mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *EnsurePullabilityBatchOutput) Reset() { - *x = EnsurePullabilityBatchOutput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnsurePullabilityBatchOutput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnsurePullabilityBatchOutput) ProtoMessage() {} - -func (x *EnsurePullabilityBatchOutput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnsurePullabilityBatchOutput.ProtoReflect.Descriptor instead. -func (*EnsurePullabilityBatchOutput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{22} -} - -func (x *EnsurePullabilityBatchOutput) GetTableIdentifierMapping() map[string]*TableIdentifier { - if x != nil { - return x.TableIdentifierMapping - } - return nil -} - -type SetupReplicationInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerConnectionConfig *Peer `protobuf:"bytes,1,opt,name=peer_connection_config,json=peerConnectionConfig,proto3" json:"peer_connection_config,omitempty"` - FlowJobName string `protobuf:"bytes,2,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - TableNameMapping map[string]string `protobuf:"bytes,3,rep,name=table_name_mapping,json=tableNameMapping,proto3" json:"table_name_mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // replicate to destination using ctid - DestinationPeer *Peer `protobuf:"bytes,4,opt,name=destination_peer,json=destinationPeer,proto3" json:"destination_peer,omitempty"` - DoInitialCopy bool `protobuf:"varint,5,opt,name=do_initial_copy,json=doInitialCopy,proto3" json:"do_initial_copy,omitempty"` - ExistingPublicationName string `protobuf:"bytes,6,opt,name=existing_publication_name,json=existingPublicationName,proto3" json:"existing_publication_name,omitempty"` - ExistingReplicationSlotName string `protobuf:"bytes,7,opt,name=existing_replication_slot_name,json=existingReplicationSlotName,proto3" json:"existing_replication_slot_name,omitempty"` -} - -func (x *SetupReplicationInput) Reset() { - *x = SetupReplicationInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetupReplicationInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetupReplicationInput) ProtoMessage() {} - -func (x *SetupReplicationInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetupReplicationInput.ProtoReflect.Descriptor instead. -func (*SetupReplicationInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{23} -} - -func (x *SetupReplicationInput) GetPeerConnectionConfig() *Peer { - if x != nil { - return x.PeerConnectionConfig - } - return nil -} - -func (x *SetupReplicationInput) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *SetupReplicationInput) GetTableNameMapping() map[string]string { - if x != nil { - return x.TableNameMapping - } - return nil -} - -func (x *SetupReplicationInput) GetDestinationPeer() *Peer { - if x != nil { - return x.DestinationPeer - } - return nil -} - -func (x *SetupReplicationInput) GetDoInitialCopy() bool { - if x != nil { - return x.DoInitialCopy - } - return false -} - -func (x *SetupReplicationInput) GetExistingPublicationName() string { - if x != nil { - return x.ExistingPublicationName - } - return "" -} - -func (x *SetupReplicationInput) GetExistingReplicationSlotName() string { - if x != nil { - return x.ExistingReplicationSlotName - } - return "" -} - -type SetupReplicationOutput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SlotName string `protobuf:"bytes,1,opt,name=slot_name,json=slotName,proto3" json:"slot_name,omitempty"` - SnapshotName string `protobuf:"bytes,2,opt,name=snapshot_name,json=snapshotName,proto3" json:"snapshot_name,omitempty"` -} - -func (x *SetupReplicationOutput) Reset() { - *x = SetupReplicationOutput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetupReplicationOutput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetupReplicationOutput) ProtoMessage() {} - -func (x *SetupReplicationOutput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetupReplicationOutput.ProtoReflect.Descriptor instead. -func (*SetupReplicationOutput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{24} -} - -func (x *SetupReplicationOutput) GetSlotName() string { - if x != nil { - return x.SlotName - } - return "" -} - -func (x *SetupReplicationOutput) GetSnapshotName() string { - if x != nil { - return x.SnapshotName - } - return "" -} - -type CreateRawTableInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerConnectionConfig *Peer `protobuf:"bytes,1,opt,name=peer_connection_config,json=peerConnectionConfig,proto3" json:"peer_connection_config,omitempty"` - FlowJobName string `protobuf:"bytes,2,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - TableNameMapping map[string]string `protobuf:"bytes,3,rep,name=table_name_mapping,json=tableNameMapping,proto3" json:"table_name_mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - CdcSyncMode QRepSyncMode `protobuf:"varint,4,opt,name=cdc_sync_mode,json=cdcSyncMode,proto3,enum=peerdb_flow.QRepSyncMode" json:"cdc_sync_mode,omitempty"` -} - -func (x *CreateRawTableInput) Reset() { - *x = CreateRawTableInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateRawTableInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateRawTableInput) ProtoMessage() {} - -func (x *CreateRawTableInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateRawTableInput.ProtoReflect.Descriptor instead. -func (*CreateRawTableInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{25} -} - -func (x *CreateRawTableInput) GetPeerConnectionConfig() *Peer { - if x != nil { - return x.PeerConnectionConfig - } - return nil -} - -func (x *CreateRawTableInput) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *CreateRawTableInput) GetTableNameMapping() map[string]string { - if x != nil { - return x.TableNameMapping - } - return nil -} - -func (x *CreateRawTableInput) GetCdcSyncMode() QRepSyncMode { - if x != nil { - return x.CdcSyncMode - } - return QRepSyncMode_QREP_SYNC_MODE_MULTI_INSERT -} - -type CreateRawTableOutput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TableIdentifier string `protobuf:"bytes,1,opt,name=table_identifier,json=tableIdentifier,proto3" json:"table_identifier,omitempty"` -} - -func (x *CreateRawTableOutput) Reset() { - *x = CreateRawTableOutput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateRawTableOutput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateRawTableOutput) ProtoMessage() {} - -func (x *CreateRawTableOutput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateRawTableOutput.ProtoReflect.Descriptor instead. -func (*CreateRawTableOutput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{26} -} - -func (x *CreateRawTableOutput) GetTableIdentifier() string { - if x != nil { - return x.TableIdentifier - } - return "" -} - -type TableSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TableIdentifier string `protobuf:"bytes,1,opt,name=table_identifier,json=tableIdentifier,proto3" json:"table_identifier,omitempty"` - // DEPRECATED: eliminate when breaking changes are allowed. - Columns map[string]string `protobuf:"bytes,2,rep,name=columns,proto3" json:"columns,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - PrimaryKeyColumns []string `protobuf:"bytes,3,rep,name=primary_key_columns,json=primaryKeyColumns,proto3" json:"primary_key_columns,omitempty"` - IsReplicaIdentityFull bool `protobuf:"varint,4,opt,name=is_replica_identity_full,json=isReplicaIdentityFull,proto3" json:"is_replica_identity_full,omitempty"` - ColumnNames []string `protobuf:"bytes,5,rep,name=column_names,json=columnNames,proto3" json:"column_names,omitempty"` - ColumnTypes []string `protobuf:"bytes,6,rep,name=column_types,json=columnTypes,proto3" json:"column_types,omitempty"` -} - -func (x *TableSchema) Reset() { - *x = TableSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TableSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TableSchema) ProtoMessage() {} - -func (x *TableSchema) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TableSchema.ProtoReflect.Descriptor instead. -func (*TableSchema) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{27} -} - -func (x *TableSchema) GetTableIdentifier() string { - if x != nil { - return x.TableIdentifier - } - return "" -} - -func (x *TableSchema) GetColumns() map[string]string { - if x != nil { - return x.Columns - } - return nil -} - -func (x *TableSchema) GetPrimaryKeyColumns() []string { - if x != nil { - return x.PrimaryKeyColumns - } - return nil -} - -func (x *TableSchema) GetIsReplicaIdentityFull() bool { - if x != nil { - return x.IsReplicaIdentityFull - } - return false -} - -func (x *TableSchema) GetColumnNames() []string { - if x != nil { - return x.ColumnNames - } - return nil -} - -func (x *TableSchema) GetColumnTypes() []string { - if x != nil { - return x.ColumnTypes - } - return nil -} - -type GetTableSchemaBatchInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerConnectionConfig *Peer `protobuf:"bytes,1,opt,name=peer_connection_config,json=peerConnectionConfig,proto3" json:"peer_connection_config,omitempty"` - TableIdentifiers []string `protobuf:"bytes,2,rep,name=table_identifiers,json=tableIdentifiers,proto3" json:"table_identifiers,omitempty"` - FlowName string `protobuf:"bytes,3,opt,name=flow_name,json=flowName,proto3" json:"flow_name,omitempty"` - SkipPkeyAndReplicaCheck bool `protobuf:"varint,4,opt,name=skip_pkey_and_replica_check,json=skipPkeyAndReplicaCheck,proto3" json:"skip_pkey_and_replica_check,omitempty"` -} - -func (x *GetTableSchemaBatchInput) Reset() { - *x = GetTableSchemaBatchInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTableSchemaBatchInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTableSchemaBatchInput) ProtoMessage() {} - -func (x *GetTableSchemaBatchInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTableSchemaBatchInput.ProtoReflect.Descriptor instead. -func (*GetTableSchemaBatchInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{28} -} - -func (x *GetTableSchemaBatchInput) GetPeerConnectionConfig() *Peer { - if x != nil { - return x.PeerConnectionConfig - } - return nil -} - -func (x *GetTableSchemaBatchInput) GetTableIdentifiers() []string { - if x != nil { - return x.TableIdentifiers - } - return nil -} - -func (x *GetTableSchemaBatchInput) GetFlowName() string { - if x != nil { - return x.FlowName - } - return "" -} - -func (x *GetTableSchemaBatchInput) GetSkipPkeyAndReplicaCheck() bool { - if x != nil { - return x.SkipPkeyAndReplicaCheck - } - return false -} - -type GetTableSchemaBatchOutput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TableNameSchemaMapping map[string]*TableSchema `protobuf:"bytes,1,rep,name=table_name_schema_mapping,json=tableNameSchemaMapping,proto3" json:"table_name_schema_mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *GetTableSchemaBatchOutput) Reset() { - *x = GetTableSchemaBatchOutput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTableSchemaBatchOutput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTableSchemaBatchOutput) ProtoMessage() {} - -func (x *GetTableSchemaBatchOutput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTableSchemaBatchOutput.ProtoReflect.Descriptor instead. -func (*GetTableSchemaBatchOutput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{29} -} - -func (x *GetTableSchemaBatchOutput) GetTableNameSchemaMapping() map[string]*TableSchema { - if x != nil { - return x.TableNameSchemaMapping - } - return nil -} - -type SetupNormalizedTableInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerConnectionConfig *Peer `protobuf:"bytes,1,opt,name=peer_connection_config,json=peerConnectionConfig,proto3" json:"peer_connection_config,omitempty"` - TableIdentifier string `protobuf:"bytes,2,opt,name=table_identifier,json=tableIdentifier,proto3" json:"table_identifier,omitempty"` - SourceTableSchema *TableSchema `protobuf:"bytes,3,opt,name=source_table_schema,json=sourceTableSchema,proto3" json:"source_table_schema,omitempty"` -} - -func (x *SetupNormalizedTableInput) Reset() { - *x = SetupNormalizedTableInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetupNormalizedTableInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetupNormalizedTableInput) ProtoMessage() {} - -func (x *SetupNormalizedTableInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetupNormalizedTableInput.ProtoReflect.Descriptor instead. -func (*SetupNormalizedTableInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{30} -} - -func (x *SetupNormalizedTableInput) GetPeerConnectionConfig() *Peer { - if x != nil { - return x.PeerConnectionConfig - } - return nil -} - -func (x *SetupNormalizedTableInput) GetTableIdentifier() string { - if x != nil { - return x.TableIdentifier - } - return "" -} - -func (x *SetupNormalizedTableInput) GetSourceTableSchema() *TableSchema { - if x != nil { - return x.SourceTableSchema - } - return nil -} - -type SetupNormalizedTableBatchInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerConnectionConfig *Peer `protobuf:"bytes,1,opt,name=peer_connection_config,json=peerConnectionConfig,proto3" json:"peer_connection_config,omitempty"` - TableNameSchemaMapping map[string]*TableSchema `protobuf:"bytes,2,rep,name=table_name_schema_mapping,json=tableNameSchemaMapping,proto3" json:"table_name_schema_mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // migration related columns - SoftDeleteColName string `protobuf:"bytes,4,opt,name=soft_delete_col_name,json=softDeleteColName,proto3" json:"soft_delete_col_name,omitempty"` - SyncedAtColName string `protobuf:"bytes,5,opt,name=synced_at_col_name,json=syncedAtColName,proto3" json:"synced_at_col_name,omitempty"` - FlowName string `protobuf:"bytes,6,opt,name=flow_name,json=flowName,proto3" json:"flow_name,omitempty"` -} - -func (x *SetupNormalizedTableBatchInput) Reset() { - *x = SetupNormalizedTableBatchInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetupNormalizedTableBatchInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetupNormalizedTableBatchInput) ProtoMessage() {} - -func (x *SetupNormalizedTableBatchInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetupNormalizedTableBatchInput.ProtoReflect.Descriptor instead. -func (*SetupNormalizedTableBatchInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{31} -} - -func (x *SetupNormalizedTableBatchInput) GetPeerConnectionConfig() *Peer { - if x != nil { - return x.PeerConnectionConfig - } - return nil -} - -func (x *SetupNormalizedTableBatchInput) GetTableNameSchemaMapping() map[string]*TableSchema { - if x != nil { - return x.TableNameSchemaMapping - } - return nil -} - -func (x *SetupNormalizedTableBatchInput) GetSoftDeleteColName() string { - if x != nil { - return x.SoftDeleteColName - } - return "" -} - -func (x *SetupNormalizedTableBatchInput) GetSyncedAtColName() string { - if x != nil { - return x.SyncedAtColName - } - return "" -} - -func (x *SetupNormalizedTableBatchInput) GetFlowName() string { - if x != nil { - return x.FlowName - } - return "" -} - -type SetupNormalizedTableOutput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TableIdentifier string `protobuf:"bytes,1,opt,name=table_identifier,json=tableIdentifier,proto3" json:"table_identifier,omitempty"` - AlreadyExists bool `protobuf:"varint,2,opt,name=already_exists,json=alreadyExists,proto3" json:"already_exists,omitempty"` -} - -func (x *SetupNormalizedTableOutput) Reset() { - *x = SetupNormalizedTableOutput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetupNormalizedTableOutput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetupNormalizedTableOutput) ProtoMessage() {} - -func (x *SetupNormalizedTableOutput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetupNormalizedTableOutput.ProtoReflect.Descriptor instead. -func (*SetupNormalizedTableOutput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{32} -} - -func (x *SetupNormalizedTableOutput) GetTableIdentifier() string { - if x != nil { - return x.TableIdentifier - } - return "" -} - -func (x *SetupNormalizedTableOutput) GetAlreadyExists() bool { - if x != nil { - return x.AlreadyExists - } - return false -} - -type SetupNormalizedTableBatchOutput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TableExistsMapping map[string]bool `protobuf:"bytes,1,rep,name=table_exists_mapping,json=tableExistsMapping,proto3" json:"table_exists_mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` -} - -func (x *SetupNormalizedTableBatchOutput) Reset() { - *x = SetupNormalizedTableBatchOutput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetupNormalizedTableBatchOutput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetupNormalizedTableBatchOutput) ProtoMessage() {} - -func (x *SetupNormalizedTableBatchOutput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetupNormalizedTableBatchOutput.ProtoReflect.Descriptor instead. -func (*SetupNormalizedTableBatchOutput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{33} -} - -func (x *SetupNormalizedTableBatchOutput) GetTableExistsMapping() map[string]bool { - if x != nil { - return x.TableExistsMapping - } - return nil -} - -// partition ranges [start, end] inclusive -type IntPartitionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` -} - -func (x *IntPartitionRange) Reset() { - *x = IntPartitionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IntPartitionRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IntPartitionRange) ProtoMessage() {} - -func (x *IntPartitionRange) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IntPartitionRange.ProtoReflect.Descriptor instead. -func (*IntPartitionRange) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{34} -} - -func (x *IntPartitionRange) GetStart() int64 { - if x != nil { - return x.Start - } - return 0 -} - -func (x *IntPartitionRange) GetEnd() int64 { - if x != nil { - return x.End - } - return 0 -} - -type TimestampPartitionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` - End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` -} - -func (x *TimestampPartitionRange) Reset() { - *x = TimestampPartitionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimestampPartitionRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimestampPartitionRange) ProtoMessage() {} - -func (x *TimestampPartitionRange) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimestampPartitionRange.ProtoReflect.Descriptor instead. -func (*TimestampPartitionRange) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{35} -} - -func (x *TimestampPartitionRange) GetStart() *timestamppb.Timestamp { - if x != nil { - return x.Start - } - return nil -} - -func (x *TimestampPartitionRange) GetEnd() *timestamppb.Timestamp { - if x != nil { - return x.End - } - return nil -} - -type TID struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - BlockNumber uint32 `protobuf:"varint,1,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` - OffsetNumber uint32 `protobuf:"varint,2,opt,name=offset_number,json=offsetNumber,proto3" json:"offset_number,omitempty"` -} - -func (x *TID) Reset() { - *x = TID{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TID) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TID) ProtoMessage() {} - -func (x *TID) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TID.ProtoReflect.Descriptor instead. -func (*TID) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{36} -} - -func (x *TID) GetBlockNumber() uint32 { - if x != nil { - return x.BlockNumber - } - return 0 -} - -func (x *TID) GetOffsetNumber() uint32 { - if x != nil { - return x.OffsetNumber - } - return 0 -} - -type TIDPartitionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Start *TID `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` - End *TID `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` -} - -func (x *TIDPartitionRange) Reset() { - *x = TIDPartitionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TIDPartitionRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TIDPartitionRange) ProtoMessage() {} - -func (x *TIDPartitionRange) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TIDPartitionRange.ProtoReflect.Descriptor instead. -func (*TIDPartitionRange) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{37} -} - -func (x *TIDPartitionRange) GetStart() *TID { - if x != nil { - return x.Start - } - return nil -} - -func (x *TIDPartitionRange) GetEnd() *TID { - if x != nil { - return x.End - } - return nil -} - -type PartitionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // can be a timestamp range or an integer range - // - // Types that are assignable to Range: - // - // *PartitionRange_IntRange - // *PartitionRange_TimestampRange - // *PartitionRange_TidRange - Range isPartitionRange_Range `protobuf_oneof:"range"` -} - -func (x *PartitionRange) Reset() { - *x = PartitionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PartitionRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PartitionRange) ProtoMessage() {} - -func (x *PartitionRange) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PartitionRange.ProtoReflect.Descriptor instead. -func (*PartitionRange) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{38} -} - -func (m *PartitionRange) GetRange() isPartitionRange_Range { - if m != nil { - return m.Range - } - return nil -} - -func (x *PartitionRange) GetIntRange() *IntPartitionRange { - if x, ok := x.GetRange().(*PartitionRange_IntRange); ok { - return x.IntRange - } - return nil -} - -func (x *PartitionRange) GetTimestampRange() *TimestampPartitionRange { - if x, ok := x.GetRange().(*PartitionRange_TimestampRange); ok { - return x.TimestampRange - } - return nil -} - -func (x *PartitionRange) GetTidRange() *TIDPartitionRange { - if x, ok := x.GetRange().(*PartitionRange_TidRange); ok { - return x.TidRange - } - return nil -} - -type isPartitionRange_Range interface { - isPartitionRange_Range() -} - -type PartitionRange_IntRange struct { - IntRange *IntPartitionRange `protobuf:"bytes,1,opt,name=int_range,json=intRange,proto3,oneof"` -} - -type PartitionRange_TimestampRange struct { - TimestampRange *TimestampPartitionRange `protobuf:"bytes,2,opt,name=timestamp_range,json=timestampRange,proto3,oneof"` -} - -type PartitionRange_TidRange struct { - TidRange *TIDPartitionRange `protobuf:"bytes,3,opt,name=tid_range,json=tidRange,proto3,oneof"` -} - -func (*PartitionRange_IntRange) isPartitionRange_Range() {} - -func (*PartitionRange_TimestampRange) isPartitionRange_Range() {} - -func (*PartitionRange_TidRange) isPartitionRange_Range() {} - -type QRepWriteMode struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - WriteType QRepWriteType `protobuf:"varint,1,opt,name=write_type,json=writeType,proto3,enum=peerdb_flow.QRepWriteType" json:"write_type,omitempty"` - UpsertKeyColumns []string `protobuf:"bytes,2,rep,name=upsert_key_columns,json=upsertKeyColumns,proto3" json:"upsert_key_columns,omitempty"` -} - -func (x *QRepWriteMode) Reset() { - *x = QRepWriteMode{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QRepWriteMode) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QRepWriteMode) ProtoMessage() {} - -func (x *QRepWriteMode) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QRepWriteMode.ProtoReflect.Descriptor instead. -func (*QRepWriteMode) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{39} -} - -func (x *QRepWriteMode) GetWriteType() QRepWriteType { - if x != nil { - return x.WriteType - } - return QRepWriteType_QREP_WRITE_MODE_APPEND -} - -func (x *QRepWriteMode) GetUpsertKeyColumns() []string { - if x != nil { - return x.UpsertKeyColumns - } - return nil -} - -type QRepConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowJobName string `protobuf:"bytes,1,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - SourcePeer *Peer `protobuf:"bytes,2,opt,name=source_peer,json=sourcePeer,proto3" json:"source_peer,omitempty"` - DestinationPeer *Peer `protobuf:"bytes,3,opt,name=destination_peer,json=destinationPeer,proto3" json:"destination_peer,omitempty"` - DestinationTableIdentifier string `protobuf:"bytes,4,opt,name=destination_table_identifier,json=destinationTableIdentifier,proto3" json:"destination_table_identifier,omitempty"` - Query string `protobuf:"bytes,5,opt,name=query,proto3" json:"query,omitempty"` - WatermarkTable string `protobuf:"bytes,6,opt,name=watermark_table,json=watermarkTable,proto3" json:"watermark_table,omitempty"` - WatermarkColumn string `protobuf:"bytes,7,opt,name=watermark_column,json=watermarkColumn,proto3" json:"watermark_column,omitempty"` - InitialCopyOnly bool `protobuf:"varint,8,opt,name=initial_copy_only,json=initialCopyOnly,proto3" json:"initial_copy_only,omitempty"` - SyncMode QRepSyncMode `protobuf:"varint,9,opt,name=sync_mode,json=syncMode,proto3,enum=peerdb_flow.QRepSyncMode" json:"sync_mode,omitempty"` - // DEPRECATED: eliminate when breaking changes are allowed. - BatchSizeInt uint32 `protobuf:"varint,10,opt,name=batch_size_int,json=batchSizeInt,proto3" json:"batch_size_int,omitempty"` - // DEPRECATED: eliminate when breaking changes are allowed. - BatchDurationSeconds uint32 `protobuf:"varint,11,opt,name=batch_duration_seconds,json=batchDurationSeconds,proto3" json:"batch_duration_seconds,omitempty"` - MaxParallelWorkers uint32 `protobuf:"varint,12,opt,name=max_parallel_workers,json=maxParallelWorkers,proto3" json:"max_parallel_workers,omitempty"` - // time to wait between getting partitions to process - WaitBetweenBatchesSeconds uint32 `protobuf:"varint,13,opt,name=wait_between_batches_seconds,json=waitBetweenBatchesSeconds,proto3" json:"wait_between_batches_seconds,omitempty"` - WriteMode *QRepWriteMode `protobuf:"bytes,14,opt,name=write_mode,json=writeMode,proto3" json:"write_mode,omitempty"` - // This is only used when sync_mode is AVRO - // this is the location where the avro files will be written - // if this starts with gs:// then it will be written to GCS - // if this starts with s3:// then it will be written to S3, only supported in Snowflake - // if nothing is specified then it will be written to local disk - // if using GCS or S3 make sure your instance has the correct permissions. - StagingPath string `protobuf:"bytes,15,opt,name=staging_path,json=stagingPath,proto3" json:"staging_path,omitempty"` - // This setting overrides batch_size_int and batch_duration_seconds - // and instead uses the number of rows per partition to determine - // how many rows to process per batch. - NumRowsPerPartition uint32 `protobuf:"varint,16,opt,name=num_rows_per_partition,json=numRowsPerPartition,proto3" json:"num_rows_per_partition,omitempty"` - // Creates the watermark table on the destination as-is, can be used for some queries. - SetupWatermarkTableOnDestination bool `protobuf:"varint,17,opt,name=setup_watermark_table_on_destination,json=setupWatermarkTableOnDestination,proto3" json:"setup_watermark_table_on_destination,omitempty"` - // create new tables with "_peerdb_resync" suffix, perform initial load and then swap the new table with the old ones - // to be used after the old mirror is dropped - DstTableFullResync bool `protobuf:"varint,18,opt,name=dst_table_full_resync,json=dstTableFullResync,proto3" json:"dst_table_full_resync,omitempty"` - SyncedAtColName string `protobuf:"bytes,19,opt,name=synced_at_col_name,json=syncedAtColName,proto3" json:"synced_at_col_name,omitempty"` - SoftDeleteColName string `protobuf:"bytes,20,opt,name=soft_delete_col_name,json=softDeleteColName,proto3" json:"soft_delete_col_name,omitempty"` -} - -func (x *QRepConfig) Reset() { - *x = QRepConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QRepConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QRepConfig) ProtoMessage() {} - -func (x *QRepConfig) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QRepConfig.ProtoReflect.Descriptor instead. -func (*QRepConfig) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{40} -} - -func (x *QRepConfig) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *QRepConfig) GetSourcePeer() *Peer { - if x != nil { - return x.SourcePeer - } - return nil -} - -func (x *QRepConfig) GetDestinationPeer() *Peer { - if x != nil { - return x.DestinationPeer - } - return nil -} - -func (x *QRepConfig) GetDestinationTableIdentifier() string { - if x != nil { - return x.DestinationTableIdentifier - } - return "" -} - -func (x *QRepConfig) GetQuery() string { - if x != nil { - return x.Query - } - return "" -} - -func (x *QRepConfig) GetWatermarkTable() string { - if x != nil { - return x.WatermarkTable - } - return "" -} - -func (x *QRepConfig) GetWatermarkColumn() string { - if x != nil { - return x.WatermarkColumn - } - return "" -} - -func (x *QRepConfig) GetInitialCopyOnly() bool { - if x != nil { - return x.InitialCopyOnly - } - return false -} - -func (x *QRepConfig) GetSyncMode() QRepSyncMode { - if x != nil { - return x.SyncMode - } - return QRepSyncMode_QREP_SYNC_MODE_MULTI_INSERT -} - -func (x *QRepConfig) GetBatchSizeInt() uint32 { - if x != nil { - return x.BatchSizeInt - } - return 0 -} - -func (x *QRepConfig) GetBatchDurationSeconds() uint32 { - if x != nil { - return x.BatchDurationSeconds - } - return 0 -} - -func (x *QRepConfig) GetMaxParallelWorkers() uint32 { - if x != nil { - return x.MaxParallelWorkers - } - return 0 -} - -func (x *QRepConfig) GetWaitBetweenBatchesSeconds() uint32 { - if x != nil { - return x.WaitBetweenBatchesSeconds - } - return 0 -} - -func (x *QRepConfig) GetWriteMode() *QRepWriteMode { - if x != nil { - return x.WriteMode - } - return nil -} - -func (x *QRepConfig) GetStagingPath() string { - if x != nil { - return x.StagingPath - } - return "" -} - -func (x *QRepConfig) GetNumRowsPerPartition() uint32 { - if x != nil { - return x.NumRowsPerPartition - } - return 0 -} - -func (x *QRepConfig) GetSetupWatermarkTableOnDestination() bool { - if x != nil { - return x.SetupWatermarkTableOnDestination - } - return false -} - -func (x *QRepConfig) GetDstTableFullResync() bool { - if x != nil { - return x.DstTableFullResync - } - return false -} - -func (x *QRepConfig) GetSyncedAtColName() string { - if x != nil { - return x.SyncedAtColName - } - return "" -} - -func (x *QRepConfig) GetSoftDeleteColName() string { - if x != nil { - return x.SoftDeleteColName - } - return "" -} - -type QRepPartition struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PartitionId string `protobuf:"bytes,2,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"` - Range *PartitionRange `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` - FullTablePartition bool `protobuf:"varint,4,opt,name=full_table_partition,json=fullTablePartition,proto3" json:"full_table_partition,omitempty"` -} - -func (x *QRepPartition) Reset() { - *x = QRepPartition{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QRepPartition) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QRepPartition) ProtoMessage() {} - -func (x *QRepPartition) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QRepPartition.ProtoReflect.Descriptor instead. -func (*QRepPartition) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{41} -} - -func (x *QRepPartition) GetPartitionId() string { - if x != nil { - return x.PartitionId - } - return "" -} - -func (x *QRepPartition) GetRange() *PartitionRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *QRepPartition) GetFullTablePartition() bool { - if x != nil { - return x.FullTablePartition - } - return false -} - -type QRepPartitionBatch struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - BatchId int32 `protobuf:"varint,1,opt,name=batch_id,json=batchId,proto3" json:"batch_id,omitempty"` - Partitions []*QRepPartition `protobuf:"bytes,2,rep,name=partitions,proto3" json:"partitions,omitempty"` -} - -func (x *QRepPartitionBatch) Reset() { - *x = QRepPartitionBatch{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QRepPartitionBatch) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QRepPartitionBatch) ProtoMessage() {} - -func (x *QRepPartitionBatch) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QRepPartitionBatch.ProtoReflect.Descriptor instead. -func (*QRepPartitionBatch) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{42} -} - -func (x *QRepPartitionBatch) GetBatchId() int32 { - if x != nil { - return x.BatchId - } - return 0 -} - -func (x *QRepPartitionBatch) GetPartitions() []*QRepPartition { - if x != nil { - return x.Partitions - } - return nil -} - -type QRepParitionResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Partitions []*QRepPartition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty"` -} - -func (x *QRepParitionResult) Reset() { - *x = QRepParitionResult{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QRepParitionResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QRepParitionResult) ProtoMessage() {} - -func (x *QRepParitionResult) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QRepParitionResult.ProtoReflect.Descriptor instead. -func (*QRepParitionResult) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{43} -} - -func (x *QRepParitionResult) GetPartitions() []*QRepPartition { - if x != nil { - return x.Partitions - } - return nil -} - -type DropFlowInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowName string `protobuf:"bytes,1,opt,name=flow_name,json=flowName,proto3" json:"flow_name,omitempty"` -} - -func (x *DropFlowInput) Reset() { - *x = DropFlowInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropFlowInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropFlowInput) ProtoMessage() {} - -func (x *DropFlowInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropFlowInput.ProtoReflect.Descriptor instead. -func (*DropFlowInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{44} -} - -func (x *DropFlowInput) GetFlowName() string { - if x != nil { - return x.FlowName - } - return "" -} - -type DeltaAddedColumn struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ColumnName string `protobuf:"bytes,1,opt,name=column_name,json=columnName,proto3" json:"column_name,omitempty"` - ColumnType string `protobuf:"bytes,2,opt,name=column_type,json=columnType,proto3" json:"column_type,omitempty"` -} - -func (x *DeltaAddedColumn) Reset() { - *x = DeltaAddedColumn{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeltaAddedColumn) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeltaAddedColumn) ProtoMessage() {} - -func (x *DeltaAddedColumn) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeltaAddedColumn.ProtoReflect.Descriptor instead. -func (*DeltaAddedColumn) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{45} -} - -func (x *DeltaAddedColumn) GetColumnName() string { - if x != nil { - return x.ColumnName - } - return "" -} - -func (x *DeltaAddedColumn) GetColumnType() string { - if x != nil { - return x.ColumnType - } - return "" -} - -type TableSchemaDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SrcTableName string `protobuf:"bytes,1,opt,name=src_table_name,json=srcTableName,proto3" json:"src_table_name,omitempty"` - DstTableName string `protobuf:"bytes,2,opt,name=dst_table_name,json=dstTableName,proto3" json:"dst_table_name,omitempty"` - AddedColumns []*DeltaAddedColumn `protobuf:"bytes,3,rep,name=added_columns,json=addedColumns,proto3" json:"added_columns,omitempty"` -} - -func (x *TableSchemaDelta) Reset() { - *x = TableSchemaDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TableSchemaDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TableSchemaDelta) ProtoMessage() {} - -func (x *TableSchemaDelta) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TableSchemaDelta.ProtoReflect.Descriptor instead. -func (*TableSchemaDelta) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{46} -} - -func (x *TableSchemaDelta) GetSrcTableName() string { - if x != nil { - return x.SrcTableName - } - return "" -} - -func (x *TableSchemaDelta) GetDstTableName() string { - if x != nil { - return x.DstTableName - } - return "" -} - -func (x *TableSchemaDelta) GetAddedColumns() []*DeltaAddedColumn { - if x != nil { - return x.AddedColumns - } - return nil -} - -type ReplayTableSchemaDeltaInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowConnectionConfigs *FlowConnectionConfigs `protobuf:"bytes,1,opt,name=flow_connection_configs,json=flowConnectionConfigs,proto3" json:"flow_connection_configs,omitempty"` - TableSchemaDeltas []*TableSchemaDelta `protobuf:"bytes,2,rep,name=table_schema_deltas,json=tableSchemaDeltas,proto3" json:"table_schema_deltas,omitempty"` -} - -func (x *ReplayTableSchemaDeltaInput) Reset() { - *x = ReplayTableSchemaDeltaInput{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReplayTableSchemaDeltaInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReplayTableSchemaDeltaInput) ProtoMessage() {} - -func (x *ReplayTableSchemaDeltaInput) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReplayTableSchemaDeltaInput.ProtoReflect.Descriptor instead. -func (*ReplayTableSchemaDeltaInput) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{47} -} - -func (x *ReplayTableSchemaDeltaInput) GetFlowConnectionConfigs() *FlowConnectionConfigs { - if x != nil { - return x.FlowConnectionConfigs - } - return nil -} - -func (x *ReplayTableSchemaDeltaInput) GetTableSchemaDeltas() []*TableSchemaDelta { - if x != nil { - return x.TableSchemaDeltas - } - return nil -} - -type QRepFlowState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - LastPartition *QRepPartition `protobuf:"bytes,1,opt,name=last_partition,json=lastPartition,proto3" json:"last_partition,omitempty"` - NumPartitionsProcessed uint64 `protobuf:"varint,2,opt,name=num_partitions_processed,json=numPartitionsProcessed,proto3" json:"num_partitions_processed,omitempty"` - NeedsResync bool `protobuf:"varint,3,opt,name=needs_resync,json=needsResync,proto3" json:"needs_resync,omitempty"` - DisableWaitForNewRows bool `protobuf:"varint,4,opt,name=disable_wait_for_new_rows,json=disableWaitForNewRows,proto3" json:"disable_wait_for_new_rows,omitempty"` -} - -func (x *QRepFlowState) Reset() { - *x = QRepFlowState{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QRepFlowState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QRepFlowState) ProtoMessage() {} - -func (x *QRepFlowState) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QRepFlowState.ProtoReflect.Descriptor instead. -func (*QRepFlowState) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{48} -} - -func (x *QRepFlowState) GetLastPartition() *QRepPartition { - if x != nil { - return x.LastPartition - } - return nil -} - -func (x *QRepFlowState) GetNumPartitionsProcessed() uint64 { - if x != nil { - return x.NumPartitionsProcessed - } - return 0 -} - -func (x *QRepFlowState) GetNeedsResync() bool { - if x != nil { - return x.NeedsResync - } - return false -} - -func (x *QRepFlowState) GetDisableWaitForNewRows() bool { - if x != nil { - return x.DisableWaitForNewRows - } - return false -} - -type PeerDBColumns struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SoftDeleteColName string `protobuf:"bytes,1,opt,name=soft_delete_col_name,json=softDeleteColName,proto3" json:"soft_delete_col_name,omitempty"` - SyncedAtColName string `protobuf:"bytes,2,opt,name=synced_at_col_name,json=syncedAtColName,proto3" json:"synced_at_col_name,omitempty"` - SoftDelete bool `protobuf:"varint,3,opt,name=soft_delete,json=softDelete,proto3" json:"soft_delete,omitempty"` -} - -func (x *PeerDBColumns) Reset() { - *x = PeerDBColumns{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerDBColumns) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerDBColumns) ProtoMessage() {} - -func (x *PeerDBColumns) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerDBColumns.ProtoReflect.Descriptor instead. -func (*PeerDBColumns) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{49} -} - -func (x *PeerDBColumns) GetSoftDeleteColName() string { - if x != nil { - return x.SoftDeleteColName - } - return "" -} - -func (x *PeerDBColumns) GetSyncedAtColName() string { - if x != nil { - return x.SyncedAtColName - } - return "" -} - -func (x *PeerDBColumns) GetSoftDelete() bool { - if x != nil { - return x.SoftDelete - } - return false -} - -type GetOpenConnectionsForUserResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` - CurrentOpenConnections int64 `protobuf:"varint,2,opt,name=current_open_connections,json=currentOpenConnections,proto3" json:"current_open_connections,omitempty"` -} - -func (x *GetOpenConnectionsForUserResult) Reset() { - *x = GetOpenConnectionsForUserResult{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetOpenConnectionsForUserResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetOpenConnectionsForUserResult) ProtoMessage() {} - -func (x *GetOpenConnectionsForUserResult) ProtoReflect() protoreflect.Message { - mi := &file_flow_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetOpenConnectionsForUserResult.ProtoReflect.Descriptor instead. -func (*GetOpenConnectionsForUserResult) Descriptor() ([]byte, []int) { - return file_flow_proto_rawDescGZIP(), []int{50} -} - -func (x *GetOpenConnectionsForUserResult) GetUserName() string { - if x != nil { - return x.UserName - } - return "" -} - -func (x *GetOpenConnectionsForUserResult) GetCurrentOpenConnections() int64 { - if x != nil { - return x.CurrentOpenConnections - } - return 0 -} - -var File_flow_proto protoreflect.FileDescriptor - -var file_flow_proto_rawDesc = []byte{ - 0x0a, 0x0a, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x70, 0x65, 0x65, 0x72, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x74, 0x0a, 0x10, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x0a, 0x11, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x5e, 0x0a, - 0x15, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x22, 0x95, 0x01, - 0x0a, 0x0f, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x07, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0xc7, 0x01, 0x0a, 0x0c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x40, - 0x0a, 0x1c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x22, - 0x51, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x75, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x26, 0x0a, - 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, - 0x04, 0x70, 0x65, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, - 0x6d, 0x65, 0x22, 0xf0, 0x0c, 0x0a, 0x15, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x2a, 0x0a, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, - 0x72, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, - 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x40, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, - 0x6e, 0x67, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x77, 0x0a, 0x19, 0x73, 0x72, 0x63, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, - 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2e, 0x53, 0x72, 0x63, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x49, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x15, 0x73, 0x72, 0x63, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x4e, - 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x79, 0x0a, 0x19, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, - 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x61, - 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x37, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x50, 0x65, 0x65, 0x72, 0x12, 0x24, - 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x42, 0x61, 0x74, 0x63, 0x68, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x6f, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, - 0x6f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x29, 0x0a, 0x10, - 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x1f, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x70, 0x65, 0x72, - 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x1b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x75, 0x6d, 0x52, 0x6f, 0x77, - 0x73, 0x50, 0x65, 0x72, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x1d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x61, - 0x78, 0x50, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, - 0x12, 0x44, 0x0a, 0x1f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x75, 0x6d, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6c, - 0x6c, 0x65, 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x4e, 0x75, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x49, 0x6e, 0x50, 0x61, - 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x12, 0x47, 0x0a, 0x12, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x51, 0x52, 0x65, 0x70, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x10, 0x73, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x12, - 0x3d, 0x0a, 0x0d, 0x63, 0x64, 0x63, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x6d, 0x6f, 0x64, 0x65, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, - 0x65, 0x52, 0x0b, 0x63, 0x64, 0x63, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x32, - 0x0a, 0x15, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x69, - 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x53, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x61, - 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x64, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, - 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x64, - 0x63, 0x53, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, - 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x32, 0x0a, - 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6c, 0x6f, - 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6c, 0x6f, 0x74, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x70, 0x75, 0x73, 0x68, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x75, 0x73, - 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d, 0x18, 0x16, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0f, 0x70, 0x75, 0x73, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, - 0x6c, 0x69, 0x73, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x17, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x2f, 0x0a, 0x14, - 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x6f, 0x66, 0x74, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, - 0x12, 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x6e, 0x63, 0x65, - 0x64, 0x41, 0x74, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, - 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, - 0x70, 0x79, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x48, 0x0a, 0x1a, 0x53, 0x72, 0x63, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x49, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0x63, 0x0a, 0x1b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x2e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8e, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, - 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xc9, 0x02, 0x0a, 0x11, 0x52, 0x65, 0x6e, 0x61, 0x6d, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x22, 0x0a, 0x0d, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x26, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, - 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x12, 0x50, 0x0a, 0x14, 0x72, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x14, 0x73, 0x6f, - 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x11, 0x73, 0x6f, 0x66, 0x74, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, - 0x12, 0x30, 0x0a, 0x12, 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x63, 0x6f, - 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x0f, - 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x41, 0x74, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x88, - 0x01, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, - 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x38, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc7, 0x02, 0x0a, - 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x46, 0x72, 0x6f, - 0x6d, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x22, - 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x12, 0x8b, 0x01, 0x0a, 0x1d, 0x6e, - 0x65, 0x77, 0x5f, 0x74, 0x6f, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x46, 0x72, 0x6f, - 0x6d, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x4e, - 0x65, 0x77, 0x54, 0x6f, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x19, 0x6e, - 0x65, 0x77, 0x54, 0x6f, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x1a, 0x4c, 0x0a, 0x1e, 0x4e, 0x65, 0x77, 0x54, - 0x6f, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, - 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x8d, 0x02, 0x0a, - 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x46, 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x72, 0x0a, 0x18, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x38, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, - 0x53, 0x79, 0x6e, 0x63, 0x46, 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x72, 0x65, 0x6c, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x70, 0x70, - 0x69, 0x6e, 0x67, 0x1a, 0x67, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x35, 0x0a, 0x14, - 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x46, 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, - 0x69, 0x7a, 0x65, 0x22, 0x71, 0x0a, 0x0d, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x79, 0x6e, - 0x63, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x79, - 0x6e, 0x63, 0x65, 0x64, 0x41, 0x74, 0x22, 0xd6, 0x03, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x46, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x6c, 0x61, 0x73, - 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, - 0x6c, 0x61, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5a, 0x0a, - 0x17, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, - 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x73, 0x52, 0x15, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x48, 0x0a, 0x11, 0x73, 0x79, 0x6e, - 0x63, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, - 0x6f, 0x77, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x46, 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x0f, 0x73, 0x79, 0x6e, 0x63, 0x46, 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x71, 0x0a, 0x18, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x70, - 0x75, 0x74, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, - 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x1a, 0x67, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0x71, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, - 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a, 0x0a, 0x17, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x15, 0x66, 0x6c, 0x6f, - 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x79, - 0x6e, 0x63, 0x65, 0x64, 0x49, 0x44, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x48, 0x0a, 0x16, 0x70, - 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, - 0x14, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, - 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, - 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x16, 0x45, 0x6e, - 0x73, 0x75, 0x72, 0x65, 0x50, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x12, 0x48, 0x0a, 0x16, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, - 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x14, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, - 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0xc5, 0x01, 0x0a, 0x1b, 0x45, - 0x6e, 0x73, 0x75, 0x72, 0x65, 0x50, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x48, 0x0a, 0x16, 0x70, 0x65, - 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x14, - 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, - 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x73, 0x22, 0x30, 0x0a, 0x17, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x15, 0x0a, - 0x06, 0x72, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, - 0x65, 0x6c, 0x49, 0x64, 0x22, 0x89, 0x01, 0x0a, 0x0f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x19, 0x70, 0x6f, 0x73, 0x74, - 0x67, 0x72, 0x65, 0x73, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, - 0x65, 0x73, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x17, 0x70, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x12, 0x0a, 0x10, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x22, 0x62, 0x0a, 0x17, 0x45, 0x6e, 0x73, 0x75, 0x72, 0x65, 0x50, 0x75, 0x6c, 0x6c, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x10, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x22, 0x88, 0x02, 0x0a, 0x1c, 0x45, 0x6e, 0x73, 0x75, 0x72, 0x65, 0x50, - 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x42, 0x61, 0x74, 0x63, 0x68, 0x4f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x7f, 0x0a, 0x18, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, - 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x73, 0x75, 0x72, 0x65, 0x50, 0x75, 0x6c, 0x6c, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x42, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x1a, 0x67, 0x0a, 0x1b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0x9a, 0x04, 0x0a, 0x15, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x48, 0x0a, 0x16, 0x70, 0x65, 0x65, - 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, - 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x14, 0x70, - 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, - 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x66, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, - 0x3d, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x65, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, - 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0f, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x26, - 0x0a, 0x0f, 0x64, 0x6f, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x70, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6f, 0x49, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x78, 0x69, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x65, 0x78, 0x69, 0x73, 0x74, - 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x1e, 0x65, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1b, 0x65, 0x78, 0x69, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x6c, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x43, 0x0a, 0x15, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5a, 0x0a, 0x16, - 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6c, 0x6f, 0x74, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x52, 0x61, 0x77, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, - 0x12, 0x48, 0x0a, 0x16, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x52, 0x14, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x64, - 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x70, - 0x70, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, - 0x61, 0x77, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x70, - 0x70, 0x69, 0x6e, 0x67, 0x12, 0x3d, 0x0a, 0x0d, 0x63, 0x64, 0x63, 0x5f, 0x73, 0x79, 0x6e, 0x63, - 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x53, 0x79, - 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0b, 0x63, 0x64, 0x63, 0x53, 0x79, 0x6e, 0x63, 0x4d, - 0x6f, 0x64, 0x65, 0x1a, 0x43, 0x0a, 0x15, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x41, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x52, 0x61, 0x77, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x12, 0x29, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0xe4, 0x02, 0x0a, 0x0b, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, 0x0a, 0x10, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, - 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x73, 0x5f, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x66, - 0x75, 0x6c, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x73, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x46, 0x75, 0x6c, 0x6c, - 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, - 0x6d, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xec, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, - 0x48, 0x0a, 0x16, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, - 0x65, 0x65, 0x72, 0x52, 0x14, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x6c, 0x6f, 0x77, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x1b, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x70, 0x6b, 0x65, 0x79, - 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x73, 0x6b, 0x69, 0x70, 0x50, 0x6b, - 0x65, 0x79, 0x41, 0x6e, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x22, 0xff, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x42, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, - 0x7d, 0x0a, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, - 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x1a, 0x63, - 0x0a, 0x1b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x2e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xda, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x74, 0x75, 0x70, 0x4e, 0x6f, 0x72, - 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x70, 0x75, - 0x74, 0x12, 0x48, 0x0a, 0x16, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x14, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x0a, 0x10, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x13, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x11, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x22, 0xcf, 0x03, 0x0a, 0x1e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, - 0x70, 0x75, 0x74, 0x12, 0x48, 0x0a, 0x16, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, - 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x14, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x82, 0x01, - 0x0a, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x47, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, - 0x53, 0x65, 0x74, 0x75, 0x70, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x61, - 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x61, 0x70, 0x70, 0x69, - 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x11, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x12, 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x41, 0x74, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x63, 0x0a, - 0x1b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2e, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x6e, 0x0a, 0x1a, 0x53, 0x65, 0x74, 0x75, 0x70, 0x4e, 0x6f, 0x72, 0x6d, 0x61, - 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x12, 0x29, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x61, - 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, - 0x74, 0x73, 0x22, 0xe0, 0x01, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x75, 0x70, 0x4e, 0x6f, 0x72, 0x6d, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x76, 0x0a, 0x14, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, - 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, - 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x4d, 0x61, - 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x1a, 0x45, - 0x0a, 0x17, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x4d, 0x61, 0x70, - 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3b, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x50, 0x61, 0x72, 0x74, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x79, 0x0a, 0x17, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, - 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, - 0x2c, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x4d, 0x0a, - 0x03, 0x54, 0x49, 0x44, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x5f, 0x0a, 0x11, - 0x54, 0x49, 0x44, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, - 0x49, 0x44, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x22, 0x0a, 0x03, 0x65, 0x6e, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x49, 0x44, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xe8, 0x01, - 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x3d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x49, 0x6e, 0x74, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x4f, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x72, 0x61, 0x6e, - 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, - 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x3d, 0x0a, 0x09, 0x74, 0x69, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x54, 0x49, 0x44, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, 0x74, 0x69, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, - 0x07, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x78, 0x0a, 0x0d, 0x51, 0x52, 0x65, 0x70, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x73, 0x65, 0x72, 0x74, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x10, 0x75, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x73, 0x22, 0xf7, 0x07, 0x0a, 0x0a, 0x51, 0x52, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, - 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0a, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x10, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, - 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x1c, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x61, 0x74, 0x65, - 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, 0x61, - 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x43, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, - 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x6c, - 0x79, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, - 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x52, - 0x08, 0x73, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x74, - 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x0c, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x49, 0x6e, 0x74, 0x12, - 0x34, 0x0a, 0x16, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x14, 0x62, 0x61, 0x74, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x77, 0x61, 0x69, 0x74, 0x5f, - 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x77, - 0x61, 0x69, 0x74, 0x42, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x73, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, - 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x67, 0x69, - 0x6e, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x33, 0x0a, 0x16, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x6f, - 0x77, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x6e, 0x75, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x50, - 0x65, 0x72, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x24, 0x73, - 0x65, 0x74, 0x75, 0x70, 0x5f, 0x77, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x73, 0x65, 0x74, 0x75, 0x70, - 0x57, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x6e, - 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x15, 0x64, - 0x73, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x72, 0x65, - 0x73, 0x79, 0x6e, 0x63, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x73, 0x74, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x2b, - 0x0a, 0x12, 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x6e, 0x63, - 0x65, 0x64, 0x41, 0x74, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x73, - 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x6f, 0x66, 0x74, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x97, 0x01, 0x0a, - 0x0d, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, - 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x64, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, - 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x12, 0x66, 0x75, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6b, 0x0a, 0x12, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x19, 0x0a, 0x08, - 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, - 0x62, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x12, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, - 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, - 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2c, 0x0a, 0x0d, 0x44, 0x72, 0x6f, 0x70, 0x46, 0x6c, 0x6f, - 0x77, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x6c, 0x6f, 0x77, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x41, 0x64, 0x64, 0x65, - 0x64, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x10, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x24, - 0x0a, 0x0e, 0x73, 0x72, 0x63, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x72, 0x63, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x73, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x73, - 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x64, - 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, - 0x44, 0x65, 0x6c, 0x74, 0x61, 0x41, 0x64, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x52, 0x0c, 0x61, 0x64, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0xc8, - 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a, - 0x0a, 0x17, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, - 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x52, 0x15, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x4d, 0x0a, 0x13, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xe9, 0x01, 0x0a, 0x0d, 0x51, 0x52, - 0x65, 0x70, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x0e, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, - 0x0a, 0x18, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x65, 0x64, - 0x73, 0x5f, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x6e, 0x65, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x38, 0x0a, 0x19, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, - 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x4e, 0x65, - 0x77, 0x52, 0x6f, 0x77, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x0d, 0x50, 0x65, 0x65, 0x72, 0x44, 0x42, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x6f, 0x66, 0x74, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x12, 0x73, 0x79, 0x6e, 0x63, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x41, 0x74, 0x43, 0x6f, - 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x6f, 0x66, 0x74, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x22, 0x78, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, - 0x6e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x6f, 0x72, 0x55, - 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2a, 0x50, 0x0a, 0x0c, 0x51, 0x52, 0x65, 0x70, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, - 0x12, 0x1f, 0x0a, 0x1b, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, 0x4f, - 0x44, 0x45, 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, - 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, - 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x41, 0x56, 0x52, 0x4f, - 0x10, 0x01, 0x2a, 0x66, 0x0a, 0x0d, 0x51, 0x52, 0x65, 0x70, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, - 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x00, 0x12, - 0x1a, 0x0a, 0x16, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, - 0x44, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x51, - 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x4f, - 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x02, 0x42, 0x76, 0x0a, 0x0f, 0x63, 0x6f, - 0x6d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x09, 0x46, - 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x10, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0xa2, 0x02, 0x03, 0x50, - 0x58, 0x58, 0xaa, 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0xca, - 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x16, 0x50, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, - 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_flow_proto_rawDescOnce sync.Once - file_flow_proto_rawDescData = file_flow_proto_rawDesc -) - -func file_flow_proto_rawDescGZIP() []byte { - file_flow_proto_rawDescOnce.Do(func() { - file_flow_proto_rawDescData = protoimpl.X.CompressGZIP(file_flow_proto_rawDescData) - }) - return file_flow_proto_rawDescData -} - -var file_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 63) -var file_flow_proto_goTypes = []interface{}{ - (QRepSyncMode)(0), // 0: peerdb_flow.QRepSyncMode - (QRepWriteType)(0), // 1: peerdb_flow.QRepWriteType - (*TableNameMapping)(nil), // 2: peerdb_flow.TableNameMapping - (*RelationMessageColumn)(nil), // 3: peerdb_flow.RelationMessageColumn - (*RelationMessage)(nil), // 4: peerdb_flow.RelationMessage - (*TableMapping)(nil), // 5: peerdb_flow.TableMapping - (*SetupInput)(nil), // 6: peerdb_flow.SetupInput - (*FlowConnectionConfigs)(nil), // 7: peerdb_flow.FlowConnectionConfigs - (*RenameTableOption)(nil), // 8: peerdb_flow.RenameTableOption - (*RenameTablesInput)(nil), // 9: peerdb_flow.RenameTablesInput - (*RenameTablesOutput)(nil), // 10: peerdb_flow.RenameTablesOutput - (*CreateTablesFromExistingInput)(nil), // 11: peerdb_flow.CreateTablesFromExistingInput - (*CreateTablesFromExistingOutput)(nil), // 12: peerdb_flow.CreateTablesFromExistingOutput - (*SyncFlowOptions)(nil), // 13: peerdb_flow.SyncFlowOptions - (*NormalizeFlowOptions)(nil), // 14: peerdb_flow.NormalizeFlowOptions - (*LastSyncState)(nil), // 15: peerdb_flow.LastSyncState - (*StartFlowInput)(nil), // 16: peerdb_flow.StartFlowInput - (*StartNormalizeInput)(nil), // 17: peerdb_flow.StartNormalizeInput - (*GetLastSyncedIDInput)(nil), // 18: peerdb_flow.GetLastSyncedIDInput - (*EnsurePullabilityInput)(nil), // 19: peerdb_flow.EnsurePullabilityInput - (*EnsurePullabilityBatchInput)(nil), // 20: peerdb_flow.EnsurePullabilityBatchInput - (*PostgresTableIdentifier)(nil), // 21: peerdb_flow.PostgresTableIdentifier - (*TableIdentifier)(nil), // 22: peerdb_flow.TableIdentifier - (*EnsurePullabilityOutput)(nil), // 23: peerdb_flow.EnsurePullabilityOutput - (*EnsurePullabilityBatchOutput)(nil), // 24: peerdb_flow.EnsurePullabilityBatchOutput - (*SetupReplicationInput)(nil), // 25: peerdb_flow.SetupReplicationInput - (*SetupReplicationOutput)(nil), // 26: peerdb_flow.SetupReplicationOutput - (*CreateRawTableInput)(nil), // 27: peerdb_flow.CreateRawTableInput - (*CreateRawTableOutput)(nil), // 28: peerdb_flow.CreateRawTableOutput - (*TableSchema)(nil), // 29: peerdb_flow.TableSchema - (*GetTableSchemaBatchInput)(nil), // 30: peerdb_flow.GetTableSchemaBatchInput - (*GetTableSchemaBatchOutput)(nil), // 31: peerdb_flow.GetTableSchemaBatchOutput - (*SetupNormalizedTableInput)(nil), // 32: peerdb_flow.SetupNormalizedTableInput - (*SetupNormalizedTableBatchInput)(nil), // 33: peerdb_flow.SetupNormalizedTableBatchInput - (*SetupNormalizedTableOutput)(nil), // 34: peerdb_flow.SetupNormalizedTableOutput - (*SetupNormalizedTableBatchOutput)(nil), // 35: peerdb_flow.SetupNormalizedTableBatchOutput - (*IntPartitionRange)(nil), // 36: peerdb_flow.IntPartitionRange - (*TimestampPartitionRange)(nil), // 37: peerdb_flow.TimestampPartitionRange - (*TID)(nil), // 38: peerdb_flow.TID - (*TIDPartitionRange)(nil), // 39: peerdb_flow.TIDPartitionRange - (*PartitionRange)(nil), // 40: peerdb_flow.PartitionRange - (*QRepWriteMode)(nil), // 41: peerdb_flow.QRepWriteMode - (*QRepConfig)(nil), // 42: peerdb_flow.QRepConfig - (*QRepPartition)(nil), // 43: peerdb_flow.QRepPartition - (*QRepPartitionBatch)(nil), // 44: peerdb_flow.QRepPartitionBatch - (*QRepParitionResult)(nil), // 45: peerdb_flow.QRepParitionResult - (*DropFlowInput)(nil), // 46: peerdb_flow.DropFlowInput - (*DeltaAddedColumn)(nil), // 47: peerdb_flow.DeltaAddedColumn - (*TableSchemaDelta)(nil), // 48: peerdb_flow.TableSchemaDelta - (*ReplayTableSchemaDeltaInput)(nil), // 49: peerdb_flow.ReplayTableSchemaDeltaInput - (*QRepFlowState)(nil), // 50: peerdb_flow.QRepFlowState - (*PeerDBColumns)(nil), // 51: peerdb_flow.PeerDBColumns - (*GetOpenConnectionsForUserResult)(nil), // 52: peerdb_flow.GetOpenConnectionsForUserResult - nil, // 53: peerdb_flow.FlowConnectionConfigs.SrcTableIdNameMappingEntry - nil, // 54: peerdb_flow.FlowConnectionConfigs.TableNameSchemaMappingEntry - nil, // 55: peerdb_flow.CreateTablesFromExistingInput.NewToExistingTableMappingEntry - nil, // 56: peerdb_flow.SyncFlowOptions.RelationMessageMappingEntry - nil, // 57: peerdb_flow.StartFlowInput.RelationMessageMappingEntry - nil, // 58: peerdb_flow.EnsurePullabilityBatchOutput.TableIdentifierMappingEntry - nil, // 59: peerdb_flow.SetupReplicationInput.TableNameMappingEntry - nil, // 60: peerdb_flow.CreateRawTableInput.TableNameMappingEntry - nil, // 61: peerdb_flow.TableSchema.ColumnsEntry - nil, // 62: peerdb_flow.GetTableSchemaBatchOutput.TableNameSchemaMappingEntry - nil, // 63: peerdb_flow.SetupNormalizedTableBatchInput.TableNameSchemaMappingEntry - nil, // 64: peerdb_flow.SetupNormalizedTableBatchOutput.TableExistsMappingEntry - (*Peer)(nil), // 65: peerdb_peers.Peer - (*timestamppb.Timestamp)(nil), // 66: google.protobuf.Timestamp -} -var file_flow_proto_depIdxs = []int32{ - 3, // 0: peerdb_flow.RelationMessage.columns:type_name -> peerdb_flow.RelationMessageColumn - 65, // 1: peerdb_flow.SetupInput.peer:type_name -> peerdb_peers.Peer - 65, // 2: peerdb_flow.FlowConnectionConfigs.source:type_name -> peerdb_peers.Peer - 65, // 3: peerdb_flow.FlowConnectionConfigs.destination:type_name -> peerdb_peers.Peer - 29, // 4: peerdb_flow.FlowConnectionConfigs.table_schema:type_name -> peerdb_flow.TableSchema - 5, // 5: peerdb_flow.FlowConnectionConfigs.table_mappings:type_name -> peerdb_flow.TableMapping - 53, // 6: peerdb_flow.FlowConnectionConfigs.src_table_id_name_mapping:type_name -> peerdb_flow.FlowConnectionConfigs.SrcTableIdNameMappingEntry - 54, // 7: peerdb_flow.FlowConnectionConfigs.table_name_schema_mapping:type_name -> peerdb_flow.FlowConnectionConfigs.TableNameSchemaMappingEntry - 65, // 8: peerdb_flow.FlowConnectionConfigs.metadata_peer:type_name -> peerdb_peers.Peer - 0, // 9: peerdb_flow.FlowConnectionConfigs.snapshot_sync_mode:type_name -> peerdb_flow.QRepSyncMode - 0, // 10: peerdb_flow.FlowConnectionConfigs.cdc_sync_mode:type_name -> peerdb_flow.QRepSyncMode - 29, // 11: peerdb_flow.RenameTableOption.table_schema:type_name -> peerdb_flow.TableSchema - 65, // 12: peerdb_flow.RenameTablesInput.peer:type_name -> peerdb_peers.Peer - 8, // 13: peerdb_flow.RenameTablesInput.rename_table_options:type_name -> peerdb_flow.RenameTableOption - 65, // 14: peerdb_flow.CreateTablesFromExistingInput.peer:type_name -> peerdb_peers.Peer - 55, // 15: peerdb_flow.CreateTablesFromExistingInput.new_to_existing_table_mapping:type_name -> peerdb_flow.CreateTablesFromExistingInput.NewToExistingTableMappingEntry - 56, // 16: peerdb_flow.SyncFlowOptions.relation_message_mapping:type_name -> peerdb_flow.SyncFlowOptions.RelationMessageMappingEntry - 66, // 17: peerdb_flow.LastSyncState.last_synced_at:type_name -> google.protobuf.Timestamp - 15, // 18: peerdb_flow.StartFlowInput.last_sync_state:type_name -> peerdb_flow.LastSyncState - 7, // 19: peerdb_flow.StartFlowInput.flow_connection_configs:type_name -> peerdb_flow.FlowConnectionConfigs - 13, // 20: peerdb_flow.StartFlowInput.sync_flow_options:type_name -> peerdb_flow.SyncFlowOptions - 57, // 21: peerdb_flow.StartFlowInput.relation_message_mapping:type_name -> peerdb_flow.StartFlowInput.RelationMessageMappingEntry - 7, // 22: peerdb_flow.StartNormalizeInput.flow_connection_configs:type_name -> peerdb_flow.FlowConnectionConfigs - 65, // 23: peerdb_flow.GetLastSyncedIDInput.peer_connection_config:type_name -> peerdb_peers.Peer - 65, // 24: peerdb_flow.EnsurePullabilityInput.peer_connection_config:type_name -> peerdb_peers.Peer - 65, // 25: peerdb_flow.EnsurePullabilityBatchInput.peer_connection_config:type_name -> peerdb_peers.Peer - 21, // 26: peerdb_flow.TableIdentifier.postgres_table_identifier:type_name -> peerdb_flow.PostgresTableIdentifier - 22, // 27: peerdb_flow.EnsurePullabilityOutput.table_identifier:type_name -> peerdb_flow.TableIdentifier - 58, // 28: peerdb_flow.EnsurePullabilityBatchOutput.table_identifier_mapping:type_name -> peerdb_flow.EnsurePullabilityBatchOutput.TableIdentifierMappingEntry - 65, // 29: peerdb_flow.SetupReplicationInput.peer_connection_config:type_name -> peerdb_peers.Peer - 59, // 30: peerdb_flow.SetupReplicationInput.table_name_mapping:type_name -> peerdb_flow.SetupReplicationInput.TableNameMappingEntry - 65, // 31: peerdb_flow.SetupReplicationInput.destination_peer:type_name -> peerdb_peers.Peer - 65, // 32: peerdb_flow.CreateRawTableInput.peer_connection_config:type_name -> peerdb_peers.Peer - 60, // 33: peerdb_flow.CreateRawTableInput.table_name_mapping:type_name -> peerdb_flow.CreateRawTableInput.TableNameMappingEntry - 0, // 34: peerdb_flow.CreateRawTableInput.cdc_sync_mode:type_name -> peerdb_flow.QRepSyncMode - 61, // 35: peerdb_flow.TableSchema.columns:type_name -> peerdb_flow.TableSchema.ColumnsEntry - 65, // 36: peerdb_flow.GetTableSchemaBatchInput.peer_connection_config:type_name -> peerdb_peers.Peer - 62, // 37: peerdb_flow.GetTableSchemaBatchOutput.table_name_schema_mapping:type_name -> peerdb_flow.GetTableSchemaBatchOutput.TableNameSchemaMappingEntry - 65, // 38: peerdb_flow.SetupNormalizedTableInput.peer_connection_config:type_name -> peerdb_peers.Peer - 29, // 39: peerdb_flow.SetupNormalizedTableInput.source_table_schema:type_name -> peerdb_flow.TableSchema - 65, // 40: peerdb_flow.SetupNormalizedTableBatchInput.peer_connection_config:type_name -> peerdb_peers.Peer - 63, // 41: peerdb_flow.SetupNormalizedTableBatchInput.table_name_schema_mapping:type_name -> peerdb_flow.SetupNormalizedTableBatchInput.TableNameSchemaMappingEntry - 64, // 42: peerdb_flow.SetupNormalizedTableBatchOutput.table_exists_mapping:type_name -> peerdb_flow.SetupNormalizedTableBatchOutput.TableExistsMappingEntry - 66, // 43: peerdb_flow.TimestampPartitionRange.start:type_name -> google.protobuf.Timestamp - 66, // 44: peerdb_flow.TimestampPartitionRange.end:type_name -> google.protobuf.Timestamp - 38, // 45: peerdb_flow.TIDPartitionRange.start:type_name -> peerdb_flow.TID - 38, // 46: peerdb_flow.TIDPartitionRange.end:type_name -> peerdb_flow.TID - 36, // 47: peerdb_flow.PartitionRange.int_range:type_name -> peerdb_flow.IntPartitionRange - 37, // 48: peerdb_flow.PartitionRange.timestamp_range:type_name -> peerdb_flow.TimestampPartitionRange - 39, // 49: peerdb_flow.PartitionRange.tid_range:type_name -> peerdb_flow.TIDPartitionRange - 1, // 50: peerdb_flow.QRepWriteMode.write_type:type_name -> peerdb_flow.QRepWriteType - 65, // 51: peerdb_flow.QRepConfig.source_peer:type_name -> peerdb_peers.Peer - 65, // 52: peerdb_flow.QRepConfig.destination_peer:type_name -> peerdb_peers.Peer - 0, // 53: peerdb_flow.QRepConfig.sync_mode:type_name -> peerdb_flow.QRepSyncMode - 41, // 54: peerdb_flow.QRepConfig.write_mode:type_name -> peerdb_flow.QRepWriteMode - 40, // 55: peerdb_flow.QRepPartition.range:type_name -> peerdb_flow.PartitionRange - 43, // 56: peerdb_flow.QRepPartitionBatch.partitions:type_name -> peerdb_flow.QRepPartition - 43, // 57: peerdb_flow.QRepParitionResult.partitions:type_name -> peerdb_flow.QRepPartition - 47, // 58: peerdb_flow.TableSchemaDelta.added_columns:type_name -> peerdb_flow.DeltaAddedColumn - 7, // 59: peerdb_flow.ReplayTableSchemaDeltaInput.flow_connection_configs:type_name -> peerdb_flow.FlowConnectionConfigs - 48, // 60: peerdb_flow.ReplayTableSchemaDeltaInput.table_schema_deltas:type_name -> peerdb_flow.TableSchemaDelta - 43, // 61: peerdb_flow.QRepFlowState.last_partition:type_name -> peerdb_flow.QRepPartition - 29, // 62: peerdb_flow.FlowConnectionConfigs.TableNameSchemaMappingEntry.value:type_name -> peerdb_flow.TableSchema - 4, // 63: peerdb_flow.SyncFlowOptions.RelationMessageMappingEntry.value:type_name -> peerdb_flow.RelationMessage - 4, // 64: peerdb_flow.StartFlowInput.RelationMessageMappingEntry.value:type_name -> peerdb_flow.RelationMessage - 22, // 65: peerdb_flow.EnsurePullabilityBatchOutput.TableIdentifierMappingEntry.value:type_name -> peerdb_flow.TableIdentifier - 29, // 66: peerdb_flow.GetTableSchemaBatchOutput.TableNameSchemaMappingEntry.value:type_name -> peerdb_flow.TableSchema - 29, // 67: peerdb_flow.SetupNormalizedTableBatchInput.TableNameSchemaMappingEntry.value:type_name -> peerdb_flow.TableSchema - 68, // [68:68] is the sub-list for method output_type - 68, // [68:68] is the sub-list for method input_type - 68, // [68:68] is the sub-list for extension type_name - 68, // [68:68] is the sub-list for extension extendee - 0, // [0:68] is the sub-list for field type_name -} - -func init() { file_flow_proto_init() } -func file_flow_proto_init() { - if File_flow_proto != nil { - return - } - file_peers_proto_init() - if !protoimpl.UnsafeEnabled { - file_flow_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableNameMapping); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RelationMessageColumn); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RelationMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableMapping); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetupInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FlowConnectionConfigs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RenameTableOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RenameTablesInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RenameTablesOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTablesFromExistingInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTablesFromExistingOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncFlowOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NormalizeFlowOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LastSyncState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartFlowInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartNormalizeInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetLastSyncedIDInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnsurePullabilityInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnsurePullabilityBatchInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PostgresTableIdentifier); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableIdentifier); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnsurePullabilityOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnsurePullabilityBatchOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetupReplicationInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetupReplicationOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateRawTableInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateRawTableOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTableSchemaBatchInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTableSchemaBatchOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetupNormalizedTableInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetupNormalizedTableBatchInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetupNormalizedTableOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetupNormalizedTableBatchOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IntPartitionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimestampPartitionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TID); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TIDPartitionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PartitionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QRepWriteMode); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QRepConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QRepPartition); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QRepPartitionBatch); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QRepParitionResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropFlowInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeltaAddedColumn); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableSchemaDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplayTableSchemaDeltaInput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QRepFlowState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerDBColumns); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOpenConnectionsForUserResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_flow_proto_msgTypes[7].OneofWrappers = []interface{}{} - file_flow_proto_msgTypes[20].OneofWrappers = []interface{}{ - (*TableIdentifier_PostgresTableIdentifier)(nil), - } - file_flow_proto_msgTypes[38].OneofWrappers = []interface{}{ - (*PartitionRange_IntRange)(nil), - (*PartitionRange_TimestampRange)(nil), - (*PartitionRange_TidRange)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_flow_proto_rawDesc, - NumEnums: 2, - NumMessages: 63, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_flow_proto_goTypes, - DependencyIndexes: file_flow_proto_depIdxs, - EnumInfos: file_flow_proto_enumTypes, - MessageInfos: file_flow_proto_msgTypes, - }.Build() - File_flow_proto = out.File - file_flow_proto_rawDesc = nil - file_flow_proto_goTypes = nil - file_flow_proto_depIdxs = nil -} diff --git a/flow/generated/protos/google/api/annotations.pb.go b/flow/generated/protos/google/api/annotations.pb.go deleted file mode 100644 index 15175f0e89..0000000000 --- a/flow/generated/protos/google/api/annotations.pb.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: google/api/annotations.proto - -package api - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.MethodOptions)(nil), - ExtensionType: (*HttpRule)(nil), - Field: 72295728, - Name: "google.api.http", - Tag: "bytes,72295728,opt,name=http", - Filename: "google/api/annotations.proto", - }, -} - -// Extension fields to descriptorpb.MethodOptions. -var ( - // See `HttpRule`. - // - // optional google.api.HttpRule http = 72295728; - E_Http = &file_google_api_annotations_proto_extTypes[0] -) - -var File_google_api_annotations_proto protoreflect.FileDescriptor - -var file_google_api_annotations_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, - 0x42, 0x88, 0x01, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x58, 0xaa, 0x02, 0x0a, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x70, 0x69, 0xca, 0x02, 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x5c, 0x41, 0x70, 0x69, 0xe2, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x70, - 0x69, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0b, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_google_api_annotations_proto_goTypes = []interface{}{ - (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*HttpRule)(nil), // 1: google.api.HttpRule -} -var file_google_api_annotations_proto_depIdxs = []int32{ - 0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions - 1, // 1: google.api.http:type_name -> google.api.HttpRule - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_api_annotations_proto_init() } -func file_google_api_annotations_proto_init() { - if File_google_api_annotations_proto != nil { - return - } - file_google_api_http_proto_init() - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_annotations_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 1, - NumServices: 0, - }, - GoTypes: file_google_api_annotations_proto_goTypes, - DependencyIndexes: file_google_api_annotations_proto_depIdxs, - ExtensionInfos: file_google_api_annotations_proto_extTypes, - }.Build() - File_google_api_annotations_proto = out.File - file_google_api_annotations_proto_rawDesc = nil - file_google_api_annotations_proto_goTypes = nil - file_google_api_annotations_proto_depIdxs = nil -} diff --git a/flow/generated/protos/google/api/http.pb.go b/flow/generated/protos/google/api/http.pb.go deleted file mode 100644 index f2f45fe891..0000000000 --- a/flow/generated/protos/google/api/http.pb.go +++ /dev/null @@ -1,783 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: google/api/http.proto - -package api - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -type Http struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` - // When set to true, URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` -} - -func (x *Http) Reset() { - *x = Http{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_http_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Http) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Http) ProtoMessage() {} - -func (x *Http) ProtoReflect() protoreflect.Message { - mi := &file_google_api_http_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Http.ProtoReflect.Descriptor instead. -func (*Http) Descriptor() ([]byte, []int) { - return file_google_api_http_proto_rawDescGZIP(), []int{0} -} - -func (x *Http) GetRules() []*HttpRule { - if x != nil { - return x.Rules - } - return nil -} - -func (x *Http) GetFullyDecodeReservedExpansion() bool { - if x != nil { - return x.FullyDecodeReservedExpansion - } - return false -} - -// # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and one or -// more HTTP REST endpoints. It allows developers to build a single API service -// that supports both gRPC APIs and REST APIs. Many systems, including [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -// how different portions of the gRPC request message are mapped to the URL -// path, URL query parameters, and HTTP request body. It also controls how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` is -// typically specified as an `google.api.http` annotation on the gRPC method. -// -// Each mapping specifies a URL path template and an HTTP method. The path -// template may refer to one or more fields in the gRPC request message, as long -// as each field is a non-repeated field with a primitive (non-message) type. -// The path template controls how fields of the request message are mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -// -// Any fields in the request message which are not bound by the path template -// automatically become HTTP query parameters if there is no HTTP request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have a -// primitive type or a repeated primitive type or a non-repeated message type. -// In the case of a repeated type, the parameter can be repeated in the URL -// as `...?param=A¶m=B`. In the case of a message type, each field of the -// message is mapped to a separate parameter, such as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice when -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They -// are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL -// query parameter, all fields -// are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP -// request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` matches -// zero or more URL path segments, which must be the last part of the URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -// contains any reserved character, such characters should be percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path on the client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -// server side does the reverse decoding. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path on the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are left -// unchanged. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration language -// for configuring a gRPC service to become a user-facing product. The -// service config is simply the YAML representation of the `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure gRPC -// transcoding in your service config YAML files. You do this by specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -// effect as the proto annotation. This can be particularly useful if you -// have a proto that is reused in multiple services. Note that any transcoding -// specified in the service config will override any matching transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -// proto to JSON conversion must follow the [proto3 -// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -// -// While the single segment variable follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion, the multi segment variable **does not** follow RFC 6570 Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped field, -// because client libraries are not capable of handling such variable expansion. -// -// The path variables **must not** capture the leading "/" character. The reason -// is that the most common use case "{var}" does not capture the leading "/" -// character. For consistency, all path variables must share the same behavior. -// -// Repeated message fields must not be mapped to URL query parameters, because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it can map -// the request or response body to a repeated field. However, some gRPC -// Transcoding implementations may not support this feature. -type HttpRule struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Selects a method to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax - // details. - Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - // - // Types that are assignable to Pattern: - // - // *HttpRule_Get - // *HttpRule_Put - // *HttpRule_Post - // *HttpRule_Delete - // *HttpRule_Patch - // *HttpRule_Custom - Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` - // The name of the request field whose value is mapped to the HTTP request - // body, or `*` for mapping all request fields not captured by the path - // pattern to the HTTP body, or omitted for not having any HTTP request body. - // - // NOTE: the referred field must be present at the top-level of the request - // message type. - Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` - // Optional. The name of the response field whose value is mapped to the HTTP - // response body. When omitted, the entire response message will be used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the response - // message type. - ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` -} - -func (x *HttpRule) Reset() { - *x = HttpRule{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_http_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HttpRule) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HttpRule) ProtoMessage() {} - -func (x *HttpRule) ProtoReflect() protoreflect.Message { - mi := &file_google_api_http_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead. -func (*HttpRule) Descriptor() ([]byte, []int) { - return file_google_api_http_proto_rawDescGZIP(), []int{1} -} - -func (x *HttpRule) GetSelector() string { - if x != nil { - return x.Selector - } - return "" -} - -func (m *HttpRule) GetPattern() isHttpRule_Pattern { - if m != nil { - return m.Pattern - } - return nil -} - -func (x *HttpRule) GetGet() string { - if x, ok := x.GetPattern().(*HttpRule_Get); ok { - return x.Get - } - return "" -} - -func (x *HttpRule) GetPut() string { - if x, ok := x.GetPattern().(*HttpRule_Put); ok { - return x.Put - } - return "" -} - -func (x *HttpRule) GetPost() string { - if x, ok := x.GetPattern().(*HttpRule_Post); ok { - return x.Post - } - return "" -} - -func (x *HttpRule) GetDelete() string { - if x, ok := x.GetPattern().(*HttpRule_Delete); ok { - return x.Delete - } - return "" -} - -func (x *HttpRule) GetPatch() string { - if x, ok := x.GetPattern().(*HttpRule_Patch); ok { - return x.Patch - } - return "" -} - -func (x *HttpRule) GetCustom() *CustomHttpPattern { - if x, ok := x.GetPattern().(*HttpRule_Custom); ok { - return x.Custom - } - return nil -} - -func (x *HttpRule) GetBody() string { - if x != nil { - return x.Body - } - return "" -} - -func (x *HttpRule) GetResponseBody() string { - if x != nil { - return x.ResponseBody - } - return "" -} - -func (x *HttpRule) GetAdditionalBindings() []*HttpRule { - if x != nil { - return x.AdditionalBindings - } - return nil -} - -type isHttpRule_Pattern interface { - isHttpRule_Pattern() -} - -type HttpRule_Get struct { - // Maps to HTTP GET. Used for listing and getting information about - // resources. - Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` -} - -type HttpRule_Put struct { - // Maps to HTTP PUT. Used for replacing a resource. - Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` -} - -type HttpRule_Post struct { - // Maps to HTTP POST. Used for creating a resource or performing an action. - Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` -} - -type HttpRule_Delete struct { - // Maps to HTTP DELETE. Used for deleting a resource. - Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` -} - -type HttpRule_Patch struct { - // Maps to HTTP PATCH. Used for updating a resource. - Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` -} - -type HttpRule_Custom struct { - // The custom pattern is used for specifying an HTTP method that is not - // included in the `pattern` field, such as HEAD, or "*" to leave the - // HTTP method unspecified for this rule. The wild-card rule is useful - // for services that provide content to Web (HTML) clients. - Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` -} - -func (*HttpRule_Get) isHttpRule_Pattern() {} - -func (*HttpRule_Put) isHttpRule_Pattern() {} - -func (*HttpRule_Post) isHttpRule_Pattern() {} - -func (*HttpRule_Delete) isHttpRule_Pattern() {} - -func (*HttpRule_Patch) isHttpRule_Pattern() {} - -func (*HttpRule_Custom) isHttpRule_Pattern() {} - -// A custom pattern is used for defining custom HTTP verb. -type CustomHttpPattern struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of this custom HTTP verb. - Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` - // The path matched by this custom verb. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *CustomHttpPattern) Reset() { - *x = CustomHttpPattern{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_http_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CustomHttpPattern) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CustomHttpPattern) ProtoMessage() {} - -func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message { - mi := &file_google_api_http_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead. -func (*CustomHttpPattern) Descriptor() ([]byte, []int) { - return file_google_api_http_proto_rawDescGZIP(), []int{2} -} - -func (x *CustomHttpPattern) GetKind() string { - if x != nil { - return x.Kind - } - return "" -} - -func (x *CustomHttpPattern) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -var File_google_api_http_proto protoreflect.FileDescriptor - -var file_google_api_http_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, - 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, - 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79, - 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda, - 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70, - 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, - 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, - 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, - 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, - 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, - 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x84, 0x01, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, - 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x58, 0xaa, 0x02, - 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x70, 0x69, 0xca, 0x02, 0x0a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x70, 0x69, 0xe2, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x5c, 0x41, 0x70, 0x69, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0xea, 0x02, 0x0b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x70, 0x69, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_http_proto_rawDescOnce sync.Once - file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc -) - -func file_google_api_http_proto_rawDescGZIP() []byte { - file_google_api_http_proto_rawDescOnce.Do(func() { - file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData) - }) - return file_google_api_http_proto_rawDescData -} - -var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_google_api_http_proto_goTypes = []interface{}{ - (*Http)(nil), // 0: google.api.Http - (*HttpRule)(nil), // 1: google.api.HttpRule - (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern -} -var file_google_api_http_proto_depIdxs = []int32{ - 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule - 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern - 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_google_api_http_proto_init() } -func file_google_api_http_proto_init() { - if File_google_api_http_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Http); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HttpRule); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomHttpPattern); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*HttpRule_Get)(nil), - (*HttpRule_Put)(nil), - (*HttpRule_Post)(nil), - (*HttpRule_Delete)(nil), - (*HttpRule_Patch)(nil), - (*HttpRule_Custom)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_http_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_http_proto_goTypes, - DependencyIndexes: file_google_api_http_proto_depIdxs, - MessageInfos: file_google_api_http_proto_msgTypes, - }.Build() - File_google_api_http_proto = out.File - file_google_api_http_proto_rawDesc = nil - file_google_api_http_proto_goTypes = nil - file_google_api_http_proto_depIdxs = nil -} diff --git a/flow/generated/protos/peers.pb.go b/flow/generated/protos/peers.pb.go deleted file mode 100644 index aa03dcfd82..0000000000 --- a/flow/generated/protos/peers.pb.go +++ /dev/null @@ -1,1547 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: peers.proto - -package protos - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type DBType int32 - -const ( - DBType_BIGQUERY DBType = 0 - DBType_SNOWFLAKE DBType = 1 - DBType_MONGO DBType = 2 - DBType_POSTGRES DBType = 3 - DBType_EVENTHUB DBType = 4 - DBType_S3 DBType = 5 - DBType_SQLSERVER DBType = 6 - DBType_EVENTHUB_GROUP DBType = 7 -) - -// Enum value maps for DBType. -var ( - DBType_name = map[int32]string{ - 0: "BIGQUERY", - 1: "SNOWFLAKE", - 2: "MONGO", - 3: "POSTGRES", - 4: "EVENTHUB", - 5: "S3", - 6: "SQLSERVER", - 7: "EVENTHUB_GROUP", - } - DBType_value = map[string]int32{ - "BIGQUERY": 0, - "SNOWFLAKE": 1, - "MONGO": 2, - "POSTGRES": 3, - "EVENTHUB": 4, - "S3": 5, - "SQLSERVER": 6, - "EVENTHUB_GROUP": 7, - } -) - -func (x DBType) Enum() *DBType { - p := new(DBType) - *p = x - return p -} - -func (x DBType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (DBType) Descriptor() protoreflect.EnumDescriptor { - return file_peers_proto_enumTypes[0].Descriptor() -} - -func (DBType) Type() protoreflect.EnumType { - return &file_peers_proto_enumTypes[0] -} - -func (x DBType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use DBType.Descriptor instead. -func (DBType) EnumDescriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{0} -} - -type SSHConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` - Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` - Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` - PrivateKey string `protobuf:"bytes,5,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` -} - -func (x *SSHConfig) Reset() { - *x = SSHConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SSHConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SSHConfig) ProtoMessage() {} - -func (x *SSHConfig) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SSHConfig.ProtoReflect.Descriptor instead. -func (*SSHConfig) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{0} -} - -func (x *SSHConfig) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (x *SSHConfig) GetPort() uint32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *SSHConfig) GetUser() string { - if x != nil { - return x.User - } - return "" -} - -func (x *SSHConfig) GetPassword() string { - if x != nil { - return x.Password - } - return "" -} - -func (x *SSHConfig) GetPrivateKey() string { - if x != nil { - return x.PrivateKey - } - return "" -} - -type SnowflakeConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` - PrivateKey string `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` - Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"` - Warehouse string `protobuf:"bytes,6,opt,name=warehouse,proto3" json:"warehouse,omitempty"` - Role string `protobuf:"bytes,7,opt,name=role,proto3" json:"role,omitempty"` - QueryTimeout uint64 `protobuf:"varint,8,opt,name=query_timeout,json=queryTimeout,proto3" json:"query_timeout,omitempty"` - S3Integration string `protobuf:"bytes,9,opt,name=s3_integration,json=s3Integration,proto3" json:"s3_integration,omitempty"` - Password *string `protobuf:"bytes,10,opt,name=password,proto3,oneof" json:"password,omitempty"` - // defaults to _PEERDB_INTERNAL - MetadataSchema *string `protobuf:"bytes,11,opt,name=metadata_schema,json=metadataSchema,proto3,oneof" json:"metadata_schema,omitempty"` -} - -func (x *SnowflakeConfig) Reset() { - *x = SnowflakeConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SnowflakeConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SnowflakeConfig) ProtoMessage() {} - -func (x *SnowflakeConfig) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SnowflakeConfig.ProtoReflect.Descriptor instead. -func (*SnowflakeConfig) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{1} -} - -func (x *SnowflakeConfig) GetAccountId() string { - if x != nil { - return x.AccountId - } - return "" -} - -func (x *SnowflakeConfig) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -func (x *SnowflakeConfig) GetPrivateKey() string { - if x != nil { - return x.PrivateKey - } - return "" -} - -func (x *SnowflakeConfig) GetDatabase() string { - if x != nil { - return x.Database - } - return "" -} - -func (x *SnowflakeConfig) GetWarehouse() string { - if x != nil { - return x.Warehouse - } - return "" -} - -func (x *SnowflakeConfig) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *SnowflakeConfig) GetQueryTimeout() uint64 { - if x != nil { - return x.QueryTimeout - } - return 0 -} - -func (x *SnowflakeConfig) GetS3Integration() string { - if x != nil { - return x.S3Integration - } - return "" -} - -func (x *SnowflakeConfig) GetPassword() string { - if x != nil && x.Password != nil { - return *x.Password - } - return "" -} - -func (x *SnowflakeConfig) GetMetadataSchema() string { - if x != nil && x.MetadataSchema != nil { - return *x.MetadataSchema - } - return "" -} - -type BigqueryConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AuthType string `protobuf:"bytes,1,opt,name=auth_type,json=authType,proto3" json:"auth_type,omitempty"` - ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - PrivateKeyId string `protobuf:"bytes,3,opt,name=private_key_id,json=privateKeyId,proto3" json:"private_key_id,omitempty"` - PrivateKey string `protobuf:"bytes,4,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` - ClientEmail string `protobuf:"bytes,5,opt,name=client_email,json=clientEmail,proto3" json:"client_email,omitempty"` - ClientId string `protobuf:"bytes,6,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - AuthUri string `protobuf:"bytes,7,opt,name=auth_uri,json=authUri,proto3" json:"auth_uri,omitempty"` - TokenUri string `protobuf:"bytes,8,opt,name=token_uri,json=tokenUri,proto3" json:"token_uri,omitempty"` - AuthProviderX509CertUrl string `protobuf:"bytes,9,opt,name=auth_provider_x509_cert_url,json=authProviderX509CertUrl,proto3" json:"auth_provider_x509_cert_url,omitempty"` - ClientX509CertUrl string `protobuf:"bytes,10,opt,name=client_x509_cert_url,json=clientX509CertUrl,proto3" json:"client_x509_cert_url,omitempty"` - DatasetId string `protobuf:"bytes,11,opt,name=dataset_id,json=datasetId,proto3" json:"dataset_id,omitempty"` -} - -func (x *BigqueryConfig) Reset() { - *x = BigqueryConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BigqueryConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BigqueryConfig) ProtoMessage() {} - -func (x *BigqueryConfig) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BigqueryConfig.ProtoReflect.Descriptor instead. -func (*BigqueryConfig) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{2} -} - -func (x *BigqueryConfig) GetAuthType() string { - if x != nil { - return x.AuthType - } - return "" -} - -func (x *BigqueryConfig) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -func (x *BigqueryConfig) GetPrivateKeyId() string { - if x != nil { - return x.PrivateKeyId - } - return "" -} - -func (x *BigqueryConfig) GetPrivateKey() string { - if x != nil { - return x.PrivateKey - } - return "" -} - -func (x *BigqueryConfig) GetClientEmail() string { - if x != nil { - return x.ClientEmail - } - return "" -} - -func (x *BigqueryConfig) GetClientId() string { - if x != nil { - return x.ClientId - } - return "" -} - -func (x *BigqueryConfig) GetAuthUri() string { - if x != nil { - return x.AuthUri - } - return "" -} - -func (x *BigqueryConfig) GetTokenUri() string { - if x != nil { - return x.TokenUri - } - return "" -} - -func (x *BigqueryConfig) GetAuthProviderX509CertUrl() string { - if x != nil { - return x.AuthProviderX509CertUrl - } - return "" -} - -func (x *BigqueryConfig) GetClientX509CertUrl() string { - if x != nil { - return x.ClientX509CertUrl - } - return "" -} - -func (x *BigqueryConfig) GetDatasetId() string { - if x != nil { - return x.DatasetId - } - return "" -} - -type MongoConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - Clusterurl string `protobuf:"bytes,3,opt,name=clusterurl,proto3" json:"clusterurl,omitempty"` - Clusterport int32 `protobuf:"varint,4,opt,name=clusterport,proto3" json:"clusterport,omitempty"` - Database string `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` -} - -func (x *MongoConfig) Reset() { - *x = MongoConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MongoConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MongoConfig) ProtoMessage() {} - -func (x *MongoConfig) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MongoConfig.ProtoReflect.Descriptor instead. -func (*MongoConfig) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{3} -} - -func (x *MongoConfig) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -func (x *MongoConfig) GetPassword() string { - if x != nil { - return x.Password - } - return "" -} - -func (x *MongoConfig) GetClusterurl() string { - if x != nil { - return x.Clusterurl - } - return "" -} - -func (x *MongoConfig) GetClusterport() int32 { - if x != nil { - return x.Clusterport - } - return 0 -} - -func (x *MongoConfig) GetDatabase() string { - if x != nil { - return x.Database - } - return "" -} - -type PostgresConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` - Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` - Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` - Database string `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` - // this is used only in query replication mode right now. - TransactionSnapshot string `protobuf:"bytes,6,opt,name=transaction_snapshot,json=transactionSnapshot,proto3" json:"transaction_snapshot,omitempty"` - // defaults to _peerdb_internal - MetadataSchema *string `protobuf:"bytes,7,opt,name=metadata_schema,json=metadataSchema,proto3,oneof" json:"metadata_schema,omitempty"` - SshConfig *SSHConfig `protobuf:"bytes,8,opt,name=ssh_config,json=sshConfig,proto3,oneof" json:"ssh_config,omitempty"` -} - -func (x *PostgresConfig) Reset() { - *x = PostgresConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PostgresConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PostgresConfig) ProtoMessage() {} - -func (x *PostgresConfig) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PostgresConfig.ProtoReflect.Descriptor instead. -func (*PostgresConfig) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{4} -} - -func (x *PostgresConfig) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (x *PostgresConfig) GetPort() uint32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *PostgresConfig) GetUser() string { - if x != nil { - return x.User - } - return "" -} - -func (x *PostgresConfig) GetPassword() string { - if x != nil { - return x.Password - } - return "" -} - -func (x *PostgresConfig) GetDatabase() string { - if x != nil { - return x.Database - } - return "" -} - -func (x *PostgresConfig) GetTransactionSnapshot() string { - if x != nil { - return x.TransactionSnapshot - } - return "" -} - -func (x *PostgresConfig) GetMetadataSchema() string { - if x != nil && x.MetadataSchema != nil { - return *x.MetadataSchema - } - return "" -} - -func (x *PostgresConfig) GetSshConfig() *SSHConfig { - if x != nil { - return x.SshConfig - } - return nil -} - -type EventHubConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - ResourceGroup string `protobuf:"bytes,2,opt,name=resource_group,json=resourceGroup,proto3" json:"resource_group,omitempty"` - Location string `protobuf:"bytes,3,opt,name=location,proto3" json:"location,omitempty"` - MetadataDb *PostgresConfig `protobuf:"bytes,4,opt,name=metadata_db,json=metadataDb,proto3" json:"metadata_db,omitempty"` - // if this is empty PeerDB uses `AZURE_SUBSCRIPTION_ID` environment variable. - SubscriptionId string `protobuf:"bytes,5,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` - // defaults to 3 - PartitionCount uint32 `protobuf:"varint,6,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` - // defaults to 7 - MessageRetentionInDays uint32 `protobuf:"varint,7,opt,name=message_retention_in_days,json=messageRetentionInDays,proto3" json:"message_retention_in_days,omitempty"` -} - -func (x *EventHubConfig) Reset() { - *x = EventHubConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EventHubConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EventHubConfig) ProtoMessage() {} - -func (x *EventHubConfig) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EventHubConfig.ProtoReflect.Descriptor instead. -func (*EventHubConfig) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{5} -} - -func (x *EventHubConfig) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *EventHubConfig) GetResourceGroup() string { - if x != nil { - return x.ResourceGroup - } - return "" -} - -func (x *EventHubConfig) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *EventHubConfig) GetMetadataDb() *PostgresConfig { - if x != nil { - return x.MetadataDb - } - return nil -} - -func (x *EventHubConfig) GetSubscriptionId() string { - if x != nil { - return x.SubscriptionId - } - return "" -} - -func (x *EventHubConfig) GetPartitionCount() uint32 { - if x != nil { - return x.PartitionCount - } - return 0 -} - -func (x *EventHubConfig) GetMessageRetentionInDays() uint32 { - if x != nil { - return x.MessageRetentionInDays - } - return 0 -} - -type EventHubGroupConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // event hub peer name to event hub config - Eventhubs map[string]*EventHubConfig `protobuf:"bytes,1,rep,name=eventhubs,proto3" json:"eventhubs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - MetadataDb *PostgresConfig `protobuf:"bytes,2,opt,name=metadata_db,json=metadataDb,proto3" json:"metadata_db,omitempty"` - UnnestColumns []string `protobuf:"bytes,3,rep,name=unnest_columns,json=unnestColumns,proto3" json:"unnest_columns,omitempty"` -} - -func (x *EventHubGroupConfig) Reset() { - *x = EventHubGroupConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EventHubGroupConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EventHubGroupConfig) ProtoMessage() {} - -func (x *EventHubGroupConfig) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EventHubGroupConfig.ProtoReflect.Descriptor instead. -func (*EventHubGroupConfig) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{6} -} - -func (x *EventHubGroupConfig) GetEventhubs() map[string]*EventHubConfig { - if x != nil { - return x.Eventhubs - } - return nil -} - -func (x *EventHubGroupConfig) GetMetadataDb() *PostgresConfig { - if x != nil { - return x.MetadataDb - } - return nil -} - -func (x *EventHubGroupConfig) GetUnnestColumns() []string { - if x != nil { - return x.UnnestColumns - } - return nil -} - -type S3Config struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - AccessKeyId *string `protobuf:"bytes,2,opt,name=access_key_id,json=accessKeyId,proto3,oneof" json:"access_key_id,omitempty"` - SecretAccessKey *string `protobuf:"bytes,3,opt,name=secret_access_key,json=secretAccessKey,proto3,oneof" json:"secret_access_key,omitempty"` - RoleArn *string `protobuf:"bytes,4,opt,name=role_arn,json=roleArn,proto3,oneof" json:"role_arn,omitempty"` - Region *string `protobuf:"bytes,5,opt,name=region,proto3,oneof" json:"region,omitempty"` - Endpoint *string `protobuf:"bytes,6,opt,name=endpoint,proto3,oneof" json:"endpoint,omitempty"` - MetadataDb *PostgresConfig `protobuf:"bytes,7,opt,name=metadata_db,json=metadataDb,proto3" json:"metadata_db,omitempty"` -} - -func (x *S3Config) Reset() { - *x = S3Config{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *S3Config) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*S3Config) ProtoMessage() {} - -func (x *S3Config) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use S3Config.ProtoReflect.Descriptor instead. -func (*S3Config) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{7} -} - -func (x *S3Config) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *S3Config) GetAccessKeyId() string { - if x != nil && x.AccessKeyId != nil { - return *x.AccessKeyId - } - return "" -} - -func (x *S3Config) GetSecretAccessKey() string { - if x != nil && x.SecretAccessKey != nil { - return *x.SecretAccessKey - } - return "" -} - -func (x *S3Config) GetRoleArn() string { - if x != nil && x.RoleArn != nil { - return *x.RoleArn - } - return "" -} - -func (x *S3Config) GetRegion() string { - if x != nil && x.Region != nil { - return *x.Region - } - return "" -} - -func (x *S3Config) GetEndpoint() string { - if x != nil && x.Endpoint != nil { - return *x.Endpoint - } - return "" -} - -func (x *S3Config) GetMetadataDb() *PostgresConfig { - if x != nil { - return x.MetadataDb - } - return nil -} - -type SqlServerConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"` - Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` - Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` - Database string `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` -} - -func (x *SqlServerConfig) Reset() { - *x = SqlServerConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SqlServerConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SqlServerConfig) ProtoMessage() {} - -func (x *SqlServerConfig) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SqlServerConfig.ProtoReflect.Descriptor instead. -func (*SqlServerConfig) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{8} -} - -func (x *SqlServerConfig) GetServer() string { - if x != nil { - return x.Server - } - return "" -} - -func (x *SqlServerConfig) GetPort() uint32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *SqlServerConfig) GetUser() string { - if x != nil { - return x.User - } - return "" -} - -func (x *SqlServerConfig) GetPassword() string { - if x != nil { - return x.Password - } - return "" -} - -func (x *SqlServerConfig) GetDatabase() string { - if x != nil { - return x.Database - } - return "" -} - -type Peer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Type DBType `protobuf:"varint,2,opt,name=type,proto3,enum=peerdb_peers.DBType" json:"type,omitempty"` - // Types that are assignable to Config: - // - // *Peer_SnowflakeConfig - // *Peer_BigqueryConfig - // *Peer_MongoConfig - // *Peer_PostgresConfig - // *Peer_EventhubConfig - // *Peer_S3Config - // *Peer_SqlserverConfig - // *Peer_EventhubGroupConfig - Config isPeer_Config `protobuf_oneof:"config"` -} - -func (x *Peer) Reset() { - *x = Peer{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Peer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Peer) ProtoMessage() {} - -func (x *Peer) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Peer.ProtoReflect.Descriptor instead. -func (*Peer) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{9} -} - -func (x *Peer) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Peer) GetType() DBType { - if x != nil { - return x.Type - } - return DBType_BIGQUERY -} - -func (m *Peer) GetConfig() isPeer_Config { - if m != nil { - return m.Config - } - return nil -} - -func (x *Peer) GetSnowflakeConfig() *SnowflakeConfig { - if x, ok := x.GetConfig().(*Peer_SnowflakeConfig); ok { - return x.SnowflakeConfig - } - return nil -} - -func (x *Peer) GetBigqueryConfig() *BigqueryConfig { - if x, ok := x.GetConfig().(*Peer_BigqueryConfig); ok { - return x.BigqueryConfig - } - return nil -} - -func (x *Peer) GetMongoConfig() *MongoConfig { - if x, ok := x.GetConfig().(*Peer_MongoConfig); ok { - return x.MongoConfig - } - return nil -} - -func (x *Peer) GetPostgresConfig() *PostgresConfig { - if x, ok := x.GetConfig().(*Peer_PostgresConfig); ok { - return x.PostgresConfig - } - return nil -} - -func (x *Peer) GetEventhubConfig() *EventHubConfig { - if x, ok := x.GetConfig().(*Peer_EventhubConfig); ok { - return x.EventhubConfig - } - return nil -} - -func (x *Peer) GetS3Config() *S3Config { - if x, ok := x.GetConfig().(*Peer_S3Config); ok { - return x.S3Config - } - return nil -} - -func (x *Peer) GetSqlserverConfig() *SqlServerConfig { - if x, ok := x.GetConfig().(*Peer_SqlserverConfig); ok { - return x.SqlserverConfig - } - return nil -} - -func (x *Peer) GetEventhubGroupConfig() *EventHubGroupConfig { - if x, ok := x.GetConfig().(*Peer_EventhubGroupConfig); ok { - return x.EventhubGroupConfig - } - return nil -} - -type isPeer_Config interface { - isPeer_Config() -} - -type Peer_SnowflakeConfig struct { - SnowflakeConfig *SnowflakeConfig `protobuf:"bytes,3,opt,name=snowflake_config,json=snowflakeConfig,proto3,oneof"` -} - -type Peer_BigqueryConfig struct { - BigqueryConfig *BigqueryConfig `protobuf:"bytes,4,opt,name=bigquery_config,json=bigqueryConfig,proto3,oneof"` -} - -type Peer_MongoConfig struct { - MongoConfig *MongoConfig `protobuf:"bytes,5,opt,name=mongo_config,json=mongoConfig,proto3,oneof"` -} - -type Peer_PostgresConfig struct { - PostgresConfig *PostgresConfig `protobuf:"bytes,6,opt,name=postgres_config,json=postgresConfig,proto3,oneof"` -} - -type Peer_EventhubConfig struct { - EventhubConfig *EventHubConfig `protobuf:"bytes,7,opt,name=eventhub_config,json=eventhubConfig,proto3,oneof"` -} - -type Peer_S3Config struct { - S3Config *S3Config `protobuf:"bytes,8,opt,name=s3_config,json=s3Config,proto3,oneof"` -} - -type Peer_SqlserverConfig struct { - SqlserverConfig *SqlServerConfig `protobuf:"bytes,9,opt,name=sqlserver_config,json=sqlserverConfig,proto3,oneof"` -} - -type Peer_EventhubGroupConfig struct { - EventhubGroupConfig *EventHubGroupConfig `protobuf:"bytes,10,opt,name=eventhub_group_config,json=eventhubGroupConfig,proto3,oneof"` -} - -func (*Peer_SnowflakeConfig) isPeer_Config() {} - -func (*Peer_BigqueryConfig) isPeer_Config() {} - -func (*Peer_MongoConfig) isPeer_Config() {} - -func (*Peer_PostgresConfig) isPeer_Config() {} - -func (*Peer_EventhubConfig) isPeer_Config() {} - -func (*Peer_S3Config) isPeer_Config() {} - -func (*Peer_SqlserverConfig) isPeer_Config() {} - -func (*Peer_EventhubGroupConfig) isPeer_Config() {} - -var File_peers_proto protoreflect.FileDescriptor - -var file_peers_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x09, - 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, - 0x65, 0x79, 0x22, 0xf7, 0x02, 0x0a, 0x0f, 0x53, 0x6e, 0x6f, 0x77, 0x66, 0x6c, 0x61, 0x6b, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, - 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x77, 0x61, 0x72, 0x65, 0x68, 0x6f, 0x75, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x77, 0x61, 0x72, 0x65, 0x68, 0x6f, 0x75, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x33, 0x5f, 0x69, 0x6e, 0x74, 0x65, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, - 0x33, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, - 0x0f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x99, 0x03, 0x0a, - 0x0e, 0x42, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x1b, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x49, - 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, - 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, - 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x75, 0x74, 0x68, 0x55, 0x72, 0x69, 0x12, 0x1b, 0x0a, - 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x3c, 0x0a, 0x1b, 0x61, 0x75, - 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x78, 0x35, 0x30, 0x39, - 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x17, 0x61, 0x75, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x58, 0x35, 0x30, - 0x39, 0x43, 0x65, 0x72, 0x74, 0x55, 0x72, 0x6c, 0x12, 0x2f, 0x0a, 0x14, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, 0x72, 0x6c, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x58, 0x35, - 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x55, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, 0x74, - 0x61, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, - 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x22, 0xa3, 0x01, 0x0a, 0x0b, 0x4d, 0x6f, 0x6e, - 0x67, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x75, 0x72, 0x6c, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x75, 0x72, 0x6c, - 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x72, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x70, 0x6f, - 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22, 0xc5, - 0x02, 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2c, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x48, 0x00, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x88, 0x01, 0x01, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x73, 0x73, 0x68, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbd, 0x02, 0x0a, 0x0e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x48, 0x75, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x1a, - 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0b, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x64, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, - 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x62, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x70, 0x61, 0x72, - 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x19, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x69, 0x6e, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x6e, 0x44, 0x61, 0x79, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x13, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x48, 0x75, 0x62, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, - 0x0a, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x68, 0x75, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x30, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x75, 0x62, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x68, 0x75, 0x62, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x68, 0x75, 0x62, 0x73, 0x12, 0x3d, - 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x64, 0x62, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, - 0x72, 0x73, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x62, 0x12, 0x25, 0x0a, - 0x0e, 0x75, 0x6e, 0x6e, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x75, 0x6e, 0x6e, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x73, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x68, 0x75, 0x62, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x75, 0x62, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xe0, 0x02, 0x0a, 0x08, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, - 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, - 0x27, 0x0a, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x4b, 0x65, 0x79, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x11, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x0f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x72, 0x6f, 0x6c, - 0x65, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x07, 0x72, - 0x6f, 0x6c, 0x65, 0x41, 0x72, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x67, - 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x06, 0x72, 0x65, 0x67, - 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x04, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x3d, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x5f, 0x64, 0x62, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x6f, 0x73, 0x74, - 0x67, 0x72, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x44, 0x62, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x0b, - 0x0a, 0x09, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x5f, - 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x22, 0x89, 0x01, 0x0a, 0x0f, 0x53, 0x71, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, - 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22, - 0x91, 0x05, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x4a, 0x0a, 0x10, 0x73, 0x6e, 0x6f, 0x77, 0x66, 0x6c, - 0x61, 0x6b, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, - 0x53, 0x6e, 0x6f, 0x77, 0x66, 0x6c, 0x61, 0x6b, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x0f, 0x73, 0x6e, 0x6f, 0x77, 0x66, 0x6c, 0x61, 0x6b, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x47, 0x0a, 0x0f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x42, 0x69, 0x67, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0e, 0x62, 0x69, 0x67, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0c, 0x6d, - 0x6f, 0x6e, 0x67, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x2e, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, - 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x0f, 0x70, - 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, - 0x65, 0x72, 0x73, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x68, 0x75, 0x62, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x48, 0x75, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0e, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x68, 0x75, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, - 0x09, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x16, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, - 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x08, 0x73, 0x33, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4a, 0x0a, 0x10, 0x73, 0x71, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x53, 0x71, - 0x6c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x0f, 0x73, 0x71, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x57, 0x0a, 0x15, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x48, 0x75, 0x62, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x13, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x68, 0x75, 0x62, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2a, 0x77, 0x0a, 0x06, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0c, 0x0a, - 0x08, 0x42, 0x49, 0x47, 0x51, 0x55, 0x45, 0x52, 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, - 0x4e, 0x4f, 0x57, 0x46, 0x4c, 0x41, 0x4b, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, - 0x4e, 0x47, 0x4f, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x4f, 0x53, 0x54, 0x47, 0x52, 0x45, - 0x53, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x48, 0x55, 0x42, 0x10, - 0x04, 0x12, 0x06, 0x0a, 0x02, 0x53, 0x33, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x51, 0x4c, - 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x06, 0x12, 0x12, 0x0a, 0x0e, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x48, 0x55, 0x42, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x07, 0x42, 0x7c, 0x0a, 0x10, - 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x42, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x10, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, - 0xa2, 0x02, 0x03, 0x50, 0x58, 0x58, 0xaa, 0x02, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x50, - 0x65, 0x65, 0x72, 0x73, 0xca, 0x02, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x50, 0x65, 0x65, - 0x72, 0x73, 0xe2, 0x02, 0x17, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x50, 0x65, 0x65, 0x72, 0x73, - 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0b, 0x50, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x50, 0x65, 0x65, 0x72, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_peers_proto_rawDescOnce sync.Once - file_peers_proto_rawDescData = file_peers_proto_rawDesc -) - -func file_peers_proto_rawDescGZIP() []byte { - file_peers_proto_rawDescOnce.Do(func() { - file_peers_proto_rawDescData = protoimpl.X.CompressGZIP(file_peers_proto_rawDescData) - }) - return file_peers_proto_rawDescData -} - -var file_peers_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_peers_proto_msgTypes = make([]protoimpl.MessageInfo, 11) -var file_peers_proto_goTypes = []interface{}{ - (DBType)(0), // 0: peerdb_peers.DBType - (*SSHConfig)(nil), // 1: peerdb_peers.SSHConfig - (*SnowflakeConfig)(nil), // 2: peerdb_peers.SnowflakeConfig - (*BigqueryConfig)(nil), // 3: peerdb_peers.BigqueryConfig - (*MongoConfig)(nil), // 4: peerdb_peers.MongoConfig - (*PostgresConfig)(nil), // 5: peerdb_peers.PostgresConfig - (*EventHubConfig)(nil), // 6: peerdb_peers.EventHubConfig - (*EventHubGroupConfig)(nil), // 7: peerdb_peers.EventHubGroupConfig - (*S3Config)(nil), // 8: peerdb_peers.S3Config - (*SqlServerConfig)(nil), // 9: peerdb_peers.SqlServerConfig - (*Peer)(nil), // 10: peerdb_peers.Peer - nil, // 11: peerdb_peers.EventHubGroupConfig.EventhubsEntry -} -var file_peers_proto_depIdxs = []int32{ - 1, // 0: peerdb_peers.PostgresConfig.ssh_config:type_name -> peerdb_peers.SSHConfig - 5, // 1: peerdb_peers.EventHubConfig.metadata_db:type_name -> peerdb_peers.PostgresConfig - 11, // 2: peerdb_peers.EventHubGroupConfig.eventhubs:type_name -> peerdb_peers.EventHubGroupConfig.EventhubsEntry - 5, // 3: peerdb_peers.EventHubGroupConfig.metadata_db:type_name -> peerdb_peers.PostgresConfig - 5, // 4: peerdb_peers.S3Config.metadata_db:type_name -> peerdb_peers.PostgresConfig - 0, // 5: peerdb_peers.Peer.type:type_name -> peerdb_peers.DBType - 2, // 6: peerdb_peers.Peer.snowflake_config:type_name -> peerdb_peers.SnowflakeConfig - 3, // 7: peerdb_peers.Peer.bigquery_config:type_name -> peerdb_peers.BigqueryConfig - 4, // 8: peerdb_peers.Peer.mongo_config:type_name -> peerdb_peers.MongoConfig - 5, // 9: peerdb_peers.Peer.postgres_config:type_name -> peerdb_peers.PostgresConfig - 6, // 10: peerdb_peers.Peer.eventhub_config:type_name -> peerdb_peers.EventHubConfig - 8, // 11: peerdb_peers.Peer.s3_config:type_name -> peerdb_peers.S3Config - 9, // 12: peerdb_peers.Peer.sqlserver_config:type_name -> peerdb_peers.SqlServerConfig - 7, // 13: peerdb_peers.Peer.eventhub_group_config:type_name -> peerdb_peers.EventHubGroupConfig - 6, // 14: peerdb_peers.EventHubGroupConfig.EventhubsEntry.value:type_name -> peerdb_peers.EventHubConfig - 15, // [15:15] is the sub-list for method output_type - 15, // [15:15] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name -} - -func init() { file_peers_proto_init() } -func file_peers_proto_init() { - if File_peers_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_peers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SSHConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SnowflakeConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BigqueryConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MongoConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PostgresConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EventHubConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EventHubGroupConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*S3Config); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SqlServerConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Peer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_peers_proto_msgTypes[1].OneofWrappers = []interface{}{} - file_peers_proto_msgTypes[4].OneofWrappers = []interface{}{} - file_peers_proto_msgTypes[7].OneofWrappers = []interface{}{} - file_peers_proto_msgTypes[9].OneofWrappers = []interface{}{ - (*Peer_SnowflakeConfig)(nil), - (*Peer_BigqueryConfig)(nil), - (*Peer_MongoConfig)(nil), - (*Peer_PostgresConfig)(nil), - (*Peer_EventhubConfig)(nil), - (*Peer_S3Config)(nil), - (*Peer_SqlserverConfig)(nil), - (*Peer_EventhubGroupConfig)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_peers_proto_rawDesc, - NumEnums: 1, - NumMessages: 11, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_peers_proto_goTypes, - DependencyIndexes: file_peers_proto_depIdxs, - EnumInfos: file_peers_proto_enumTypes, - MessageInfos: file_peers_proto_msgTypes, - }.Build() - File_peers_proto = out.File - file_peers_proto_rawDesc = nil - file_peers_proto_goTypes = nil - file_peers_proto_depIdxs = nil -} diff --git a/flow/generated/protos/route.pb.go b/flow/generated/protos/route.pb.go deleted file mode 100644 index 5603d4877d..0000000000 --- a/flow/generated/protos/route.pb.go +++ /dev/null @@ -1,3039 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: route.proto - -package protos - -import ( - _ "google.golang.org/genproto/googleapis/api/annotations" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type ValidatePeerStatus int32 - -const ( - ValidatePeerStatus_CREATION_UNKNOWN ValidatePeerStatus = 0 - ValidatePeerStatus_VALID ValidatePeerStatus = 1 - ValidatePeerStatus_INVALID ValidatePeerStatus = 2 -) - -// Enum value maps for ValidatePeerStatus. -var ( - ValidatePeerStatus_name = map[int32]string{ - 0: "CREATION_UNKNOWN", - 1: "VALID", - 2: "INVALID", - } - ValidatePeerStatus_value = map[string]int32{ - "CREATION_UNKNOWN": 0, - "VALID": 1, - "INVALID": 2, - } -) - -func (x ValidatePeerStatus) Enum() *ValidatePeerStatus { - p := new(ValidatePeerStatus) - *p = x - return p -} - -func (x ValidatePeerStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ValidatePeerStatus) Descriptor() protoreflect.EnumDescriptor { - return file_route_proto_enumTypes[0].Descriptor() -} - -func (ValidatePeerStatus) Type() protoreflect.EnumType { - return &file_route_proto_enumTypes[0] -} - -func (x ValidatePeerStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ValidatePeerStatus.Descriptor instead. -func (ValidatePeerStatus) EnumDescriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{0} -} - -type CreatePeerStatus int32 - -const ( - CreatePeerStatus_VALIDATION_UNKNOWN CreatePeerStatus = 0 - CreatePeerStatus_CREATED CreatePeerStatus = 1 - CreatePeerStatus_FAILED CreatePeerStatus = 2 -) - -// Enum value maps for CreatePeerStatus. -var ( - CreatePeerStatus_name = map[int32]string{ - 0: "VALIDATION_UNKNOWN", - 1: "CREATED", - 2: "FAILED", - } - CreatePeerStatus_value = map[string]int32{ - "VALIDATION_UNKNOWN": 0, - "CREATED": 1, - "FAILED": 2, - } -) - -func (x CreatePeerStatus) Enum() *CreatePeerStatus { - p := new(CreatePeerStatus) - *p = x - return p -} - -func (x CreatePeerStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (CreatePeerStatus) Descriptor() protoreflect.EnumDescriptor { - return file_route_proto_enumTypes[1].Descriptor() -} - -func (CreatePeerStatus) Type() protoreflect.EnumType { - return &file_route_proto_enumTypes[1] -} - -func (x CreatePeerStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use CreatePeerStatus.Descriptor instead. -func (CreatePeerStatus) EnumDescriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{1} -} - -// in the future, consider moving DropFlow to this and reduce route surface -type FlowState int32 - -const ( - FlowState_STATE_UNKNOWN FlowState = 0 - FlowState_STATE_RUNNING FlowState = 1 - FlowState_STATE_PAUSED FlowState = 2 -) - -// Enum value maps for FlowState. -var ( - FlowState_name = map[int32]string{ - 0: "STATE_UNKNOWN", - 1: "STATE_RUNNING", - 2: "STATE_PAUSED", - } - FlowState_value = map[string]int32{ - "STATE_UNKNOWN": 0, - "STATE_RUNNING": 1, - "STATE_PAUSED": 2, - } -) - -func (x FlowState) Enum() *FlowState { - p := new(FlowState) - *p = x - return p -} - -func (x FlowState) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FlowState) Descriptor() protoreflect.EnumDescriptor { - return file_route_proto_enumTypes[2].Descriptor() -} - -func (FlowState) Type() protoreflect.EnumType { - return &file_route_proto_enumTypes[2] -} - -func (x FlowState) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use FlowState.Descriptor instead. -func (FlowState) EnumDescriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{2} -} - -type CreateCDCFlowRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ConnectionConfigs *FlowConnectionConfigs `protobuf:"bytes,1,opt,name=connection_configs,json=connectionConfigs,proto3" json:"connection_configs,omitempty"` - CreateCatalogEntry bool `protobuf:"varint,2,opt,name=create_catalog_entry,json=createCatalogEntry,proto3" json:"create_catalog_entry,omitempty"` -} - -func (x *CreateCDCFlowRequest) Reset() { - *x = CreateCDCFlowRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateCDCFlowRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateCDCFlowRequest) ProtoMessage() {} - -func (x *CreateCDCFlowRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateCDCFlowRequest.ProtoReflect.Descriptor instead. -func (*CreateCDCFlowRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{0} -} - -func (x *CreateCDCFlowRequest) GetConnectionConfigs() *FlowConnectionConfigs { - if x != nil { - return x.ConnectionConfigs - } - return nil -} - -func (x *CreateCDCFlowRequest) GetCreateCatalogEntry() bool { - if x != nil { - return x.CreateCatalogEntry - } - return false -} - -type CreateCDCFlowResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - WorflowId string `protobuf:"bytes,1,opt,name=worflow_id,json=worflowId,proto3" json:"worflow_id,omitempty"` -} - -func (x *CreateCDCFlowResponse) Reset() { - *x = CreateCDCFlowResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateCDCFlowResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateCDCFlowResponse) ProtoMessage() {} - -func (x *CreateCDCFlowResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateCDCFlowResponse.ProtoReflect.Descriptor instead. -func (*CreateCDCFlowResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{1} -} - -func (x *CreateCDCFlowResponse) GetWorflowId() string { - if x != nil { - return x.WorflowId - } - return "" -} - -type CreateQRepFlowRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - QrepConfig *QRepConfig `protobuf:"bytes,1,opt,name=qrep_config,json=qrepConfig,proto3" json:"qrep_config,omitempty"` - CreateCatalogEntry bool `protobuf:"varint,2,opt,name=create_catalog_entry,json=createCatalogEntry,proto3" json:"create_catalog_entry,omitempty"` -} - -func (x *CreateQRepFlowRequest) Reset() { - *x = CreateQRepFlowRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateQRepFlowRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateQRepFlowRequest) ProtoMessage() {} - -func (x *CreateQRepFlowRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateQRepFlowRequest.ProtoReflect.Descriptor instead. -func (*CreateQRepFlowRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{2} -} - -func (x *CreateQRepFlowRequest) GetQrepConfig() *QRepConfig { - if x != nil { - return x.QrepConfig - } - return nil -} - -func (x *CreateQRepFlowRequest) GetCreateCatalogEntry() bool { - if x != nil { - return x.CreateCatalogEntry - } - return false -} - -type CreateQRepFlowResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - WorflowId string `protobuf:"bytes,1,opt,name=worflow_id,json=worflowId,proto3" json:"worflow_id,omitempty"` -} - -func (x *CreateQRepFlowResponse) Reset() { - *x = CreateQRepFlowResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateQRepFlowResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateQRepFlowResponse) ProtoMessage() {} - -func (x *CreateQRepFlowResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateQRepFlowResponse.ProtoReflect.Descriptor instead. -func (*CreateQRepFlowResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{3} -} - -func (x *CreateQRepFlowResponse) GetWorflowId() string { - if x != nil { - return x.WorflowId - } - return "" -} - -type ShutdownRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - FlowJobName string `protobuf:"bytes,2,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - SourcePeer *Peer `protobuf:"bytes,3,opt,name=source_peer,json=sourcePeer,proto3" json:"source_peer,omitempty"` - DestinationPeer *Peer `protobuf:"bytes,4,opt,name=destination_peer,json=destinationPeer,proto3" json:"destination_peer,omitempty"` - RemoveFlowEntry bool `protobuf:"varint,5,opt,name=remove_flow_entry,json=removeFlowEntry,proto3" json:"remove_flow_entry,omitempty"` -} - -func (x *ShutdownRequest) Reset() { - *x = ShutdownRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShutdownRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShutdownRequest) ProtoMessage() {} - -func (x *ShutdownRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShutdownRequest.ProtoReflect.Descriptor instead. -func (*ShutdownRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{4} -} - -func (x *ShutdownRequest) GetWorkflowId() string { - if x != nil { - return x.WorkflowId - } - return "" -} - -func (x *ShutdownRequest) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *ShutdownRequest) GetSourcePeer() *Peer { - if x != nil { - return x.SourcePeer - } - return nil -} - -func (x *ShutdownRequest) GetDestinationPeer() *Peer { - if x != nil { - return x.DestinationPeer - } - return nil -} - -func (x *ShutdownRequest) GetRemoveFlowEntry() bool { - if x != nil { - return x.RemoveFlowEntry - } - return false -} - -type ShutdownResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *ShutdownResponse) Reset() { - *x = ShutdownResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShutdownResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShutdownResponse) ProtoMessage() {} - -func (x *ShutdownResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShutdownResponse.ProtoReflect.Descriptor instead. -func (*ShutdownResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{5} -} - -func (x *ShutdownResponse) GetOk() bool { - if x != nil { - return x.Ok - } - return false -} - -func (x *ShutdownResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -type ValidatePeerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Peer *Peer `protobuf:"bytes,1,opt,name=peer,proto3" json:"peer,omitempty"` -} - -func (x *ValidatePeerRequest) Reset() { - *x = ValidatePeerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerRequest) ProtoMessage() {} - -func (x *ValidatePeerRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerRequest.ProtoReflect.Descriptor instead. -func (*ValidatePeerRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{6} -} - -func (x *ValidatePeerRequest) GetPeer() *Peer { - if x != nil { - return x.Peer - } - return nil -} - -type CreatePeerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Peer *Peer `protobuf:"bytes,1,opt,name=peer,proto3" json:"peer,omitempty"` -} - -func (x *CreatePeerRequest) Reset() { - *x = CreatePeerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreatePeerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreatePeerRequest) ProtoMessage() {} - -func (x *CreatePeerRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreatePeerRequest.ProtoReflect.Descriptor instead. -func (*CreatePeerRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{7} -} - -func (x *CreatePeerRequest) GetPeer() *Peer { - if x != nil { - return x.Peer - } - return nil -} - -type DropPeerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerName string `protobuf:"bytes,1,opt,name=peer_name,json=peerName,proto3" json:"peer_name,omitempty"` -} - -func (x *DropPeerRequest) Reset() { - *x = DropPeerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropPeerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropPeerRequest) ProtoMessage() {} - -func (x *DropPeerRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropPeerRequest.ProtoReflect.Descriptor instead. -func (*DropPeerRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{8} -} - -func (x *DropPeerRequest) GetPeerName() string { - if x != nil { - return x.PeerName - } - return "" -} - -type DropPeerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *DropPeerResponse) Reset() { - *x = DropPeerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropPeerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropPeerResponse) ProtoMessage() {} - -func (x *DropPeerResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropPeerResponse.ProtoReflect.Descriptor instead. -func (*DropPeerResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{9} -} - -func (x *DropPeerResponse) GetOk() bool { - if x != nil { - return x.Ok - } - return false -} - -func (x *DropPeerResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -type ValidatePeerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Status ValidatePeerStatus `protobuf:"varint,1,opt,name=status,proto3,enum=peerdb_route.ValidatePeerStatus" json:"status,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` -} - -func (x *ValidatePeerResponse) Reset() { - *x = ValidatePeerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerResponse) ProtoMessage() {} - -func (x *ValidatePeerResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerResponse.ProtoReflect.Descriptor instead. -func (*ValidatePeerResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{10} -} - -func (x *ValidatePeerResponse) GetStatus() ValidatePeerStatus { - if x != nil { - return x.Status - } - return ValidatePeerStatus_CREATION_UNKNOWN -} - -func (x *ValidatePeerResponse) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -type CreatePeerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Status CreatePeerStatus `protobuf:"varint,1,opt,name=status,proto3,enum=peerdb_route.CreatePeerStatus" json:"status,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` -} - -func (x *CreatePeerResponse) Reset() { - *x = CreatePeerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreatePeerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreatePeerResponse) ProtoMessage() {} - -func (x *CreatePeerResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreatePeerResponse.ProtoReflect.Descriptor instead. -func (*CreatePeerResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{11} -} - -func (x *CreatePeerResponse) GetStatus() CreatePeerStatus { - if x != nil { - return x.Status - } - return CreatePeerStatus_VALIDATION_UNKNOWN -} - -func (x *CreatePeerResponse) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -type MirrorStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowJobName string `protobuf:"bytes,1,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` -} - -func (x *MirrorStatusRequest) Reset() { - *x = MirrorStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MirrorStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MirrorStatusRequest) ProtoMessage() {} - -func (x *MirrorStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MirrorStatusRequest.ProtoReflect.Descriptor instead. -func (*MirrorStatusRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{12} -} - -func (x *MirrorStatusRequest) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -type PartitionStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PartitionId string `protobuf:"bytes,1,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"` - StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - NumRows int32 `protobuf:"varint,4,opt,name=num_rows,json=numRows,proto3" json:"num_rows,omitempty"` -} - -func (x *PartitionStatus) Reset() { - *x = PartitionStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PartitionStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PartitionStatus) ProtoMessage() {} - -func (x *PartitionStatus) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PartitionStatus.ProtoReflect.Descriptor instead. -func (*PartitionStatus) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{13} -} - -func (x *PartitionStatus) GetPartitionId() string { - if x != nil { - return x.PartitionId - } - return "" -} - -func (x *PartitionStatus) GetStartTime() *timestamppb.Timestamp { - if x != nil { - return x.StartTime - } - return nil -} - -func (x *PartitionStatus) GetEndTime() *timestamppb.Timestamp { - if x != nil { - return x.EndTime - } - return nil -} - -func (x *PartitionStatus) GetNumRows() int32 { - if x != nil { - return x.NumRows - } - return 0 -} - -type QRepMirrorStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Config *QRepConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - Partitions []*PartitionStatus `protobuf:"bytes,2,rep,name=partitions,proto3" json:"partitions,omitempty"` -} - -func (x *QRepMirrorStatus) Reset() { - *x = QRepMirrorStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QRepMirrorStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QRepMirrorStatus) ProtoMessage() {} - -func (x *QRepMirrorStatus) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QRepMirrorStatus.ProtoReflect.Descriptor instead. -func (*QRepMirrorStatus) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{14} -} - -func (x *QRepMirrorStatus) GetConfig() *QRepConfig { - if x != nil { - return x.Config - } - return nil -} - -func (x *QRepMirrorStatus) GetPartitions() []*PartitionStatus { - if x != nil { - return x.Partitions - } - return nil -} - -type CDCSyncStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - StartLsn int64 `protobuf:"varint,1,opt,name=start_lsn,json=startLsn,proto3" json:"start_lsn,omitempty"` - EndLsn int64 `protobuf:"varint,2,opt,name=end_lsn,json=endLsn,proto3" json:"end_lsn,omitempty"` - NumRows int32 `protobuf:"varint,3,opt,name=num_rows,json=numRows,proto3" json:"num_rows,omitempty"` - StartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - EndTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` -} - -func (x *CDCSyncStatus) Reset() { - *x = CDCSyncStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CDCSyncStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CDCSyncStatus) ProtoMessage() {} - -func (x *CDCSyncStatus) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CDCSyncStatus.ProtoReflect.Descriptor instead. -func (*CDCSyncStatus) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{15} -} - -func (x *CDCSyncStatus) GetStartLsn() int64 { - if x != nil { - return x.StartLsn - } - return 0 -} - -func (x *CDCSyncStatus) GetEndLsn() int64 { - if x != nil { - return x.EndLsn - } - return 0 -} - -func (x *CDCSyncStatus) GetNumRows() int32 { - if x != nil { - return x.NumRows - } - return 0 -} - -func (x *CDCSyncStatus) GetStartTime() *timestamppb.Timestamp { - if x != nil { - return x.StartTime - } - return nil -} - -func (x *CDCSyncStatus) GetEndTime() *timestamppb.Timestamp { - if x != nil { - return x.EndTime - } - return nil -} - -type PeerSchemasResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Schemas []string `protobuf:"bytes,1,rep,name=schemas,proto3" json:"schemas,omitempty"` -} - -func (x *PeerSchemasResponse) Reset() { - *x = PeerSchemasResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerSchemasResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerSchemasResponse) ProtoMessage() {} - -func (x *PeerSchemasResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerSchemasResponse.ProtoReflect.Descriptor instead. -func (*PeerSchemasResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{16} -} - -func (x *PeerSchemasResponse) GetSchemas() []string { - if x != nil { - return x.Schemas - } - return nil -} - -type SchemaTablesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerName string `protobuf:"bytes,1,opt,name=peer_name,json=peerName,proto3" json:"peer_name,omitempty"` - SchemaName string `protobuf:"bytes,2,opt,name=schema_name,json=schemaName,proto3" json:"schema_name,omitempty"` -} - -func (x *SchemaTablesRequest) Reset() { - *x = SchemaTablesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SchemaTablesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SchemaTablesRequest) ProtoMessage() {} - -func (x *SchemaTablesRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SchemaTablesRequest.ProtoReflect.Descriptor instead. -func (*SchemaTablesRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{17} -} - -func (x *SchemaTablesRequest) GetPeerName() string { - if x != nil { - return x.PeerName - } - return "" -} - -func (x *SchemaTablesRequest) GetSchemaName() string { - if x != nil { - return x.SchemaName - } - return "" -} - -type SchemaTablesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tables []string `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables,omitempty"` -} - -func (x *SchemaTablesResponse) Reset() { - *x = SchemaTablesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SchemaTablesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SchemaTablesResponse) ProtoMessage() {} - -func (x *SchemaTablesResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SchemaTablesResponse.ProtoReflect.Descriptor instead. -func (*SchemaTablesResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{18} -} - -func (x *SchemaTablesResponse) GetTables() []string { - if x != nil { - return x.Tables - } - return nil -} - -type AllTablesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tables []string `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables,omitempty"` -} - -func (x *AllTablesResponse) Reset() { - *x = AllTablesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AllTablesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AllTablesResponse) ProtoMessage() {} - -func (x *AllTablesResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AllTablesResponse.ProtoReflect.Descriptor instead. -func (*AllTablesResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{19} -} - -func (x *AllTablesResponse) GetTables() []string { - if x != nil { - return x.Tables - } - return nil -} - -type TableColumnsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerName string `protobuf:"bytes,1,opt,name=peer_name,json=peerName,proto3" json:"peer_name,omitempty"` - SchemaName string `protobuf:"bytes,2,opt,name=schema_name,json=schemaName,proto3" json:"schema_name,omitempty"` - TableName string `protobuf:"bytes,3,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` -} - -func (x *TableColumnsRequest) Reset() { - *x = TableColumnsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TableColumnsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TableColumnsRequest) ProtoMessage() {} - -func (x *TableColumnsRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TableColumnsRequest.ProtoReflect.Descriptor instead. -func (*TableColumnsRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{20} -} - -func (x *TableColumnsRequest) GetPeerName() string { - if x != nil { - return x.PeerName - } - return "" -} - -func (x *TableColumnsRequest) GetSchemaName() string { - if x != nil { - return x.SchemaName - } - return "" -} - -func (x *TableColumnsRequest) GetTableName() string { - if x != nil { - return x.TableName - } - return "" -} - -type TableColumnsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Columns []string `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"` -} - -func (x *TableColumnsResponse) Reset() { - *x = TableColumnsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TableColumnsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TableColumnsResponse) ProtoMessage() {} - -func (x *TableColumnsResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TableColumnsResponse.ProtoReflect.Descriptor instead. -func (*TableColumnsResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{21} -} - -func (x *TableColumnsResponse) GetColumns() []string { - if x != nil { - return x.Columns - } - return nil -} - -type PostgresPeerActivityInfoRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerName string `protobuf:"bytes,1,opt,name=peer_name,json=peerName,proto3" json:"peer_name,omitempty"` -} - -func (x *PostgresPeerActivityInfoRequest) Reset() { - *x = PostgresPeerActivityInfoRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PostgresPeerActivityInfoRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PostgresPeerActivityInfoRequest) ProtoMessage() {} - -func (x *PostgresPeerActivityInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PostgresPeerActivityInfoRequest.ProtoReflect.Descriptor instead. -func (*PostgresPeerActivityInfoRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{22} -} - -func (x *PostgresPeerActivityInfoRequest) GetPeerName() string { - if x != nil { - return x.PeerName - } - return "" -} - -type SlotInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SlotName string `protobuf:"bytes,1,opt,name=slot_name,json=slotName,proto3" json:"slot_name,omitempty"` - RedoLSN string `protobuf:"bytes,2,opt,name=redo_lSN,json=redoLSN,proto3" json:"redo_lSN,omitempty"` - RestartLSN string `protobuf:"bytes,3,opt,name=restart_lSN,json=restartLSN,proto3" json:"restart_lSN,omitempty"` - Active bool `protobuf:"varint,4,opt,name=active,proto3" json:"active,omitempty"` - LagInMb float32 `protobuf:"fixed32,5,opt,name=lag_in_mb,json=lagInMb,proto3" json:"lag_in_mb,omitempty"` - ConfirmedFlushLSN string `protobuf:"bytes,6,opt,name=confirmed_flush_lSN,json=confirmedFlushLSN,proto3" json:"confirmed_flush_lSN,omitempty"` - WalStatus string `protobuf:"bytes,7,opt,name=wal_status,json=walStatus,proto3" json:"wal_status,omitempty"` -} - -func (x *SlotInfo) Reset() { - *x = SlotInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SlotInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SlotInfo) ProtoMessage() {} - -func (x *SlotInfo) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SlotInfo.ProtoReflect.Descriptor instead. -func (*SlotInfo) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{23} -} - -func (x *SlotInfo) GetSlotName() string { - if x != nil { - return x.SlotName - } - return "" -} - -func (x *SlotInfo) GetRedoLSN() string { - if x != nil { - return x.RedoLSN - } - return "" -} - -func (x *SlotInfo) GetRestartLSN() string { - if x != nil { - return x.RestartLSN - } - return "" -} - -func (x *SlotInfo) GetActive() bool { - if x != nil { - return x.Active - } - return false -} - -func (x *SlotInfo) GetLagInMb() float32 { - if x != nil { - return x.LagInMb - } - return 0 -} - -func (x *SlotInfo) GetConfirmedFlushLSN() string { - if x != nil { - return x.ConfirmedFlushLSN - } - return "" -} - -func (x *SlotInfo) GetWalStatus() string { - if x != nil { - return x.WalStatus - } - return "" -} - -type StatInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Pid int64 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - WaitEvent string `protobuf:"bytes,2,opt,name=wait_event,json=waitEvent,proto3" json:"wait_event,omitempty"` - WaitEventType string `protobuf:"bytes,3,opt,name=wait_event_type,json=waitEventType,proto3" json:"wait_event_type,omitempty"` - QueryStart string `protobuf:"bytes,4,opt,name=query_start,json=queryStart,proto3" json:"query_start,omitempty"` - Query string `protobuf:"bytes,5,opt,name=query,proto3" json:"query,omitempty"` - Duration float32 `protobuf:"fixed32,6,opt,name=duration,proto3" json:"duration,omitempty"` -} - -func (x *StatInfo) Reset() { - *x = StatInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StatInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatInfo) ProtoMessage() {} - -func (x *StatInfo) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatInfo.ProtoReflect.Descriptor instead. -func (*StatInfo) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{24} -} - -func (x *StatInfo) GetPid() int64 { - if x != nil { - return x.Pid - } - return 0 -} - -func (x *StatInfo) GetWaitEvent() string { - if x != nil { - return x.WaitEvent - } - return "" -} - -func (x *StatInfo) GetWaitEventType() string { - if x != nil { - return x.WaitEventType - } - return "" -} - -func (x *StatInfo) GetQueryStart() string { - if x != nil { - return x.QueryStart - } - return "" -} - -func (x *StatInfo) GetQuery() string { - if x != nil { - return x.Query - } - return "" -} - -func (x *StatInfo) GetDuration() float32 { - if x != nil { - return x.Duration - } - return 0 -} - -type PeerSlotResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SlotData []*SlotInfo `protobuf:"bytes,1,rep,name=slot_data,json=slotData,proto3" json:"slot_data,omitempty"` -} - -func (x *PeerSlotResponse) Reset() { - *x = PeerSlotResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerSlotResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerSlotResponse) ProtoMessage() {} - -func (x *PeerSlotResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerSlotResponse.ProtoReflect.Descriptor instead. -func (*PeerSlotResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{25} -} - -func (x *PeerSlotResponse) GetSlotData() []*SlotInfo { - if x != nil { - return x.SlotData - } - return nil -} - -type PeerStatResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - StatData []*StatInfo `protobuf:"bytes,1,rep,name=stat_data,json=statData,proto3" json:"stat_data,omitempty"` -} - -func (x *PeerStatResponse) Reset() { - *x = PeerStatResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerStatResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerStatResponse) ProtoMessage() {} - -func (x *PeerStatResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerStatResponse.ProtoReflect.Descriptor instead. -func (*PeerStatResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{26} -} - -func (x *PeerStatResponse) GetStatData() []*StatInfo { - if x != nil { - return x.StatData - } - return nil -} - -type SnapshotStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Clones []*QRepMirrorStatus `protobuf:"bytes,1,rep,name=clones,proto3" json:"clones,omitempty"` -} - -func (x *SnapshotStatus) Reset() { - *x = SnapshotStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SnapshotStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SnapshotStatus) ProtoMessage() {} - -func (x *SnapshotStatus) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SnapshotStatus.ProtoReflect.Descriptor instead. -func (*SnapshotStatus) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{27} -} - -func (x *SnapshotStatus) GetClones() []*QRepMirrorStatus { - if x != nil { - return x.Clones - } - return nil -} - -type CDCMirrorStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Config *FlowConnectionConfigs `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - SnapshotStatus *SnapshotStatus `protobuf:"bytes,2,opt,name=snapshot_status,json=snapshotStatus,proto3" json:"snapshot_status,omitempty"` - CdcSyncs []*CDCSyncStatus `protobuf:"bytes,3,rep,name=cdc_syncs,json=cdcSyncs,proto3" json:"cdc_syncs,omitempty"` -} - -func (x *CDCMirrorStatus) Reset() { - *x = CDCMirrorStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CDCMirrorStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CDCMirrorStatus) ProtoMessage() {} - -func (x *CDCMirrorStatus) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CDCMirrorStatus.ProtoReflect.Descriptor instead. -func (*CDCMirrorStatus) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{28} -} - -func (x *CDCMirrorStatus) GetConfig() *FlowConnectionConfigs { - if x != nil { - return x.Config - } - return nil -} - -func (x *CDCMirrorStatus) GetSnapshotStatus() *SnapshotStatus { - if x != nil { - return x.SnapshotStatus - } - return nil -} - -func (x *CDCMirrorStatus) GetCdcSyncs() []*CDCSyncStatus { - if x != nil { - return x.CdcSyncs - } - return nil -} - -type MirrorStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FlowJobName string `protobuf:"bytes,1,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - // Types that are assignable to Status: - // - // *MirrorStatusResponse_QrepStatus - // *MirrorStatusResponse_CdcStatus - Status isMirrorStatusResponse_Status `protobuf_oneof:"status"` - ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *MirrorStatusResponse) Reset() { - *x = MirrorStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MirrorStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MirrorStatusResponse) ProtoMessage() {} - -func (x *MirrorStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MirrorStatusResponse.ProtoReflect.Descriptor instead. -func (*MirrorStatusResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{29} -} - -func (x *MirrorStatusResponse) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (m *MirrorStatusResponse) GetStatus() isMirrorStatusResponse_Status { - if m != nil { - return m.Status - } - return nil -} - -func (x *MirrorStatusResponse) GetQrepStatus() *QRepMirrorStatus { - if x, ok := x.GetStatus().(*MirrorStatusResponse_QrepStatus); ok { - return x.QrepStatus - } - return nil -} - -func (x *MirrorStatusResponse) GetCdcStatus() *CDCMirrorStatus { - if x, ok := x.GetStatus().(*MirrorStatusResponse_CdcStatus); ok { - return x.CdcStatus - } - return nil -} - -func (x *MirrorStatusResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -type isMirrorStatusResponse_Status interface { - isMirrorStatusResponse_Status() -} - -type MirrorStatusResponse_QrepStatus struct { - QrepStatus *QRepMirrorStatus `protobuf:"bytes,2,opt,name=qrep_status,json=qrepStatus,proto3,oneof"` -} - -type MirrorStatusResponse_CdcStatus struct { - CdcStatus *CDCMirrorStatus `protobuf:"bytes,3,opt,name=cdc_status,json=cdcStatus,proto3,oneof"` -} - -func (*MirrorStatusResponse_QrepStatus) isMirrorStatusResponse_Status() {} - -func (*MirrorStatusResponse_CdcStatus) isMirrorStatusResponse_Status() {} - -type FlowStateChangeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - FlowJobName string `protobuf:"bytes,2,opt,name=flow_job_name,json=flowJobName,proto3" json:"flow_job_name,omitempty"` - RequestedFlowState FlowState `protobuf:"varint,3,opt,name=requested_flow_state,json=requestedFlowState,proto3,enum=peerdb_route.FlowState" json:"requested_flow_state,omitempty"` -} - -func (x *FlowStateChangeRequest) Reset() { - *x = FlowStateChangeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FlowStateChangeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FlowStateChangeRequest) ProtoMessage() {} - -func (x *FlowStateChangeRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FlowStateChangeRequest.ProtoReflect.Descriptor instead. -func (*FlowStateChangeRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{30} -} - -func (x *FlowStateChangeRequest) GetWorkflowId() string { - if x != nil { - return x.WorkflowId - } - return "" -} - -func (x *FlowStateChangeRequest) GetFlowJobName() string { - if x != nil { - return x.FlowJobName - } - return "" -} - -func (x *FlowStateChangeRequest) GetRequestedFlowState() FlowState { - if x != nil { - return x.RequestedFlowState - } - return FlowState_STATE_UNKNOWN -} - -type FlowStateChangeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *FlowStateChangeResponse) Reset() { - *x = FlowStateChangeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FlowStateChangeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FlowStateChangeResponse) ProtoMessage() {} - -func (x *FlowStateChangeResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FlowStateChangeResponse.ProtoReflect.Descriptor instead. -func (*FlowStateChangeResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{31} -} - -func (x *FlowStateChangeResponse) GetOk() bool { - if x != nil { - return x.Ok - } - return false -} - -func (x *FlowStateChangeResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -type PeerDBVersionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *PeerDBVersionRequest) Reset() { - *x = PeerDBVersionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerDBVersionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerDBVersionRequest) ProtoMessage() {} - -func (x *PeerDBVersionRequest) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerDBVersionRequest.ProtoReflect.Descriptor instead. -func (*PeerDBVersionRequest) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{32} -} - -type PeerDBVersionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` -} - -func (x *PeerDBVersionResponse) Reset() { - *x = PeerDBVersionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_route_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerDBVersionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerDBVersionResponse) ProtoMessage() {} - -func (x *PeerDBVersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_route_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerDBVersionResponse.ProtoReflect.Descriptor instead. -func (*PeerDBVersionResponse) Descriptor() ([]byte, []int) { - return file_route_proto_rawDescGZIP(), []int{33} -} - -func (x *PeerDBVersionResponse) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -var File_route_proto protoreflect.FileDescriptor - -var file_route_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x70, 0x65, 0x65, 0x72, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0a, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, - 0x43, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x12, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x11, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x30, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, - 0x67, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x22, 0x36, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x43, 0x46, 0x6c, - 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x77, 0x6f, - 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x77, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x15, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x51, 0x52, 0x65, 0x70, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0b, 0x71, 0x72, 0x65, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0a, 0x71, 0x72, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, - 0x14, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5f, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, - 0x37, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x51, 0x52, 0x65, 0x70, 0x46, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x77, 0x6f, 0x72, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x77, - 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x22, 0xf6, 0x01, 0x0a, 0x0f, 0x53, 0x68, 0x75, - 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x12, 0x22, 0x0a, - 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x46, 0x6c, 0x6f, 0x77, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x22, 0x47, 0x0a, 0x10, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x3d, 0x0a, 0x13, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x26, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, - 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0x3b, 0x0a, 0x11, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, - 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0x2e, 0x0a, 0x0f, 0x44, 0x72, 0x6f, 0x70, 0x50, 0x65, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x65, 0x65, - 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x65, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x47, 0x0a, 0x10, 0x44, 0x72, 0x6f, 0x70, 0x50, 0x65, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, - 0x6a, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x66, 0x0a, 0x12, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x39, 0x0a, 0x13, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc1, - 0x01, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, - 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x72, - 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, 0x6f, - 0x77, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x10, 0x51, 0x52, 0x65, 0x70, 0x4d, 0x69, 0x72, 0x72, 0x6f, - 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x74, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x72, 0x74, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, - 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd2, 0x01, 0x0a, 0x0d, 0x43, 0x44, 0x43, 0x53, - 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x6c, 0x73, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x4c, 0x73, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6c, 0x73, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4c, 0x73, 0x6e, 0x12, - 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x2f, 0x0a, 0x13, - 0x50, 0x65, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x53, 0x0a, - 0x13, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4e, 0x61, - 0x6d, 0x65, 0x22, 0x2e, 0x0a, 0x14, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x11, 0x41, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, - 0x72, 0x0a, 0x13, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0x30, 0x0a, 0x14, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x3e, 0x0a, 0x1f, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, - 0x73, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x65, 0x65, - 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xe6, 0x01, 0x0a, 0x08, 0x53, 0x6c, 0x6f, 0x74, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6c, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x19, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x6f, 0x5f, 0x6c, 0x53, 0x4e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x72, 0x65, 0x64, 0x6f, 0x4c, 0x53, 0x4e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6c, 0x53, 0x4e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4c, 0x53, 0x4e, 0x12, 0x16, 0x0a, 0x06, 0x61, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x61, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x12, 0x1a, 0x0a, 0x09, 0x6c, 0x61, 0x67, 0x5f, 0x69, 0x6e, 0x5f, 0x6d, 0x62, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x07, 0x6c, 0x61, 0x67, 0x49, 0x6e, 0x4d, 0x62, 0x12, - 0x2e, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, 0x5f, 0x66, 0x6c, 0x75, - 0x73, 0x68, 0x5f, 0x6c, 0x53, 0x4e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x4c, 0x53, 0x4e, 0x12, - 0x1d, 0x0a, 0x0a, 0x77, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x77, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb6, - 0x01, 0x0a, 0x08, 0x53, 0x74, 0x61, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x70, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x77, 0x61, 0x69, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, - 0x77, 0x61, 0x69, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x47, 0x0a, 0x10, 0x50, 0x65, 0x65, 0x72, 0x53, - 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x73, - 0x6c, 0x6f, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x53, 0x6c, - 0x6f, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x73, 0x6c, 0x6f, 0x74, 0x44, 0x61, 0x74, 0x61, - 0x22, 0x47, 0x0a, 0x10, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x08, 0x73, 0x74, 0x61, 0x74, 0x44, 0x61, 0x74, 0x61, 0x22, 0x48, 0x0a, 0x0e, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x36, 0x0a, 0x06, 0x63, - 0x6c, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x4d, - 0x69, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x63, 0x6c, 0x6f, - 0x6e, 0x65, 0x73, 0x22, 0xce, 0x01, 0x0a, 0x0f, 0x43, 0x44, 0x43, 0x4d, 0x69, 0x72, 0x72, 0x6f, - 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0e, 0x73, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x63, 0x64, - 0x63, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x44, 0x43, - 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x08, 0x63, 0x64, 0x63, 0x53, - 0x79, 0x6e, 0x63, 0x73, 0x22, 0xec, 0x01, 0x0a, 0x14, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, - 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x71, 0x72, 0x65, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x0a, 0x71, 0x72, 0x65, 0x70, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x3e, 0x0a, 0x0a, 0x63, 0x64, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x44, 0x43, 0x4d, 0x69, 0x72, 0x72, 0x6f, - 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x09, 0x63, 0x64, 0x63, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x22, 0xa8, 0x01, 0x0a, 0x16, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, - 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x12, - 0x22, 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, - 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x17, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x65, 0x64, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x4e, - 0x0a, 0x17, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x16, - 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x44, 0x42, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x31, 0x0a, 0x15, 0x50, 0x65, 0x65, 0x72, 0x44, 0x42, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2a, 0x42, 0x0a, 0x12, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x14, 0x0a, 0x10, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x01, - 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x2a, 0x43, 0x0a, - 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x52, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, - 0x10, 0x02, 0x2a, 0x43, 0x0a, 0x09, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x4e, - 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x50, - 0x41, 0x55, 0x53, 0x45, 0x44, 0x10, 0x02, 0x32, 0xee, 0x0d, 0x0a, 0x0b, 0x46, 0x6c, 0x6f, 0x77, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x74, 0x0a, 0x0c, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x21, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12, 0x2f, 0x76, 0x31, 0x2f, 0x70, - 0x65, 0x65, 0x72, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x6c, 0x0a, - 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x3a, 0x01, 0x2a, 0x22, 0x10, 0x2f, 0x76, 0x31, 0x2f, 0x70, - 0x65, 0x65, 0x72, 0x73, 0x2f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x64, 0x0a, 0x08, 0x44, - 0x72, 0x6f, 0x70, 0x50, 0x65, 0x65, 0x72, 0x12, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x50, 0x65, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, - 0x2a, 0x22, 0x0e, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2f, 0x64, 0x72, 0x6f, - 0x70, 0x12, 0x79, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x43, 0x46, 0x6c, - 0x6f, 0x77, 0x12, 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x43, 0x46, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x43, 0x46, - 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x19, 0x3a, 0x01, 0x2a, 0x22, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x6c, 0x6f, 0x77, - 0x73, 0x2f, 0x63, 0x64, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x7d, 0x0a, 0x0e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x51, 0x52, 0x65, 0x70, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x23, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x51, 0x52, 0x65, 0x70, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x51, 0x52, 0x65, 0x70, 0x46, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x2f, - 0x71, 0x72, 0x65, 0x70, 0x2f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x79, 0x0a, 0x0a, 0x47, - 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x2d, 0x2e, 0x70, 0x65, 0x65, 0x72, - 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, - 0x73, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x13, 0x12, 0x11, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2f, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x74, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x49, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x21, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x76, 0x31, 0x2f, - 0x70, 0x65, 0x65, 0x72, 0x73, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0c, - 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x6f, 0x73, 0x74, - 0x67, 0x72, 0x65, 0x73, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x41, 0x6c, 0x6c, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x16, 0x12, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x61, 0x6c, 0x6c, 0x12, 0x6e, 0x0a, 0x0a, 0x47, 0x65, - 0x74, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x21, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x12, 0x11, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, - 0x72, 0x73, 0x2f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x81, 0x01, 0x0a, 0x0b, 0x47, - 0x65, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, - 0x65, 0x73, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, - 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x6c, 0x6f, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x1d, 0x12, 0x1b, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2f, 0x73, 0x6c, 0x6f, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x81, - 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x6f, - 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x65, 0x65, - 0x72, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x12, 0x1b, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x7d, 0x12, 0x6a, 0x0a, 0x0c, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x46, 0x6c, - 0x6f, 0x77, 0x12, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x3a, 0x01, 0x2a, 0x22, 0x10, 0x2f, 0x76, - 0x31, 0x2f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x2f, 0x64, 0x72, 0x6f, 0x70, 0x12, 0x60, - 0x0a, 0x0f, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x24, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x7a, 0x0a, 0x0c, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x21, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x12, - 0x1b, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x6a, 0x0a, 0x0a, - 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x44, 0x42, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x50, 0x65, - 0x65, 0x72, 0x44, 0x42, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0d, 0x12, 0x0b, 0x2f, 0x76, 0x31, - 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7c, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x42, 0x0a, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x10, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0xa2, 0x02, 0x03, 0x50, - 0x58, 0x58, 0xaa, 0x02, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0xca, 0x02, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0xe2, 0x02, - 0x17, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x5c, 0x47, 0x50, 0x42, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_route_proto_rawDescOnce sync.Once - file_route_proto_rawDescData = file_route_proto_rawDesc -) - -func file_route_proto_rawDescGZIP() []byte { - file_route_proto_rawDescOnce.Do(func() { - file_route_proto_rawDescData = protoimpl.X.CompressGZIP(file_route_proto_rawDescData) - }) - return file_route_proto_rawDescData -} - -var file_route_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_route_proto_msgTypes = make([]protoimpl.MessageInfo, 34) -var file_route_proto_goTypes = []interface{}{ - (ValidatePeerStatus)(0), // 0: peerdb_route.ValidatePeerStatus - (CreatePeerStatus)(0), // 1: peerdb_route.CreatePeerStatus - (FlowState)(0), // 2: peerdb_route.FlowState - (*CreateCDCFlowRequest)(nil), // 3: peerdb_route.CreateCDCFlowRequest - (*CreateCDCFlowResponse)(nil), // 4: peerdb_route.CreateCDCFlowResponse - (*CreateQRepFlowRequest)(nil), // 5: peerdb_route.CreateQRepFlowRequest - (*CreateQRepFlowResponse)(nil), // 6: peerdb_route.CreateQRepFlowResponse - (*ShutdownRequest)(nil), // 7: peerdb_route.ShutdownRequest - (*ShutdownResponse)(nil), // 8: peerdb_route.ShutdownResponse - (*ValidatePeerRequest)(nil), // 9: peerdb_route.ValidatePeerRequest - (*CreatePeerRequest)(nil), // 10: peerdb_route.CreatePeerRequest - (*DropPeerRequest)(nil), // 11: peerdb_route.DropPeerRequest - (*DropPeerResponse)(nil), // 12: peerdb_route.DropPeerResponse - (*ValidatePeerResponse)(nil), // 13: peerdb_route.ValidatePeerResponse - (*CreatePeerResponse)(nil), // 14: peerdb_route.CreatePeerResponse - (*MirrorStatusRequest)(nil), // 15: peerdb_route.MirrorStatusRequest - (*PartitionStatus)(nil), // 16: peerdb_route.PartitionStatus - (*QRepMirrorStatus)(nil), // 17: peerdb_route.QRepMirrorStatus - (*CDCSyncStatus)(nil), // 18: peerdb_route.CDCSyncStatus - (*PeerSchemasResponse)(nil), // 19: peerdb_route.PeerSchemasResponse - (*SchemaTablesRequest)(nil), // 20: peerdb_route.SchemaTablesRequest - (*SchemaTablesResponse)(nil), // 21: peerdb_route.SchemaTablesResponse - (*AllTablesResponse)(nil), // 22: peerdb_route.AllTablesResponse - (*TableColumnsRequest)(nil), // 23: peerdb_route.TableColumnsRequest - (*TableColumnsResponse)(nil), // 24: peerdb_route.TableColumnsResponse - (*PostgresPeerActivityInfoRequest)(nil), // 25: peerdb_route.PostgresPeerActivityInfoRequest - (*SlotInfo)(nil), // 26: peerdb_route.SlotInfo - (*StatInfo)(nil), // 27: peerdb_route.StatInfo - (*PeerSlotResponse)(nil), // 28: peerdb_route.PeerSlotResponse - (*PeerStatResponse)(nil), // 29: peerdb_route.PeerStatResponse - (*SnapshotStatus)(nil), // 30: peerdb_route.SnapshotStatus - (*CDCMirrorStatus)(nil), // 31: peerdb_route.CDCMirrorStatus - (*MirrorStatusResponse)(nil), // 32: peerdb_route.MirrorStatusResponse - (*FlowStateChangeRequest)(nil), // 33: peerdb_route.FlowStateChangeRequest - (*FlowStateChangeResponse)(nil), // 34: peerdb_route.FlowStateChangeResponse - (*PeerDBVersionRequest)(nil), // 35: peerdb_route.PeerDBVersionRequest - (*PeerDBVersionResponse)(nil), // 36: peerdb_route.PeerDBVersionResponse - (*FlowConnectionConfigs)(nil), // 37: peerdb_flow.FlowConnectionConfigs - (*QRepConfig)(nil), // 38: peerdb_flow.QRepConfig - (*Peer)(nil), // 39: peerdb_peers.Peer - (*timestamppb.Timestamp)(nil), // 40: google.protobuf.Timestamp -} -var file_route_proto_depIdxs = []int32{ - 37, // 0: peerdb_route.CreateCDCFlowRequest.connection_configs:type_name -> peerdb_flow.FlowConnectionConfigs - 38, // 1: peerdb_route.CreateQRepFlowRequest.qrep_config:type_name -> peerdb_flow.QRepConfig - 39, // 2: peerdb_route.ShutdownRequest.source_peer:type_name -> peerdb_peers.Peer - 39, // 3: peerdb_route.ShutdownRequest.destination_peer:type_name -> peerdb_peers.Peer - 39, // 4: peerdb_route.ValidatePeerRequest.peer:type_name -> peerdb_peers.Peer - 39, // 5: peerdb_route.CreatePeerRequest.peer:type_name -> peerdb_peers.Peer - 0, // 6: peerdb_route.ValidatePeerResponse.status:type_name -> peerdb_route.ValidatePeerStatus - 1, // 7: peerdb_route.CreatePeerResponse.status:type_name -> peerdb_route.CreatePeerStatus - 40, // 8: peerdb_route.PartitionStatus.start_time:type_name -> google.protobuf.Timestamp - 40, // 9: peerdb_route.PartitionStatus.end_time:type_name -> google.protobuf.Timestamp - 38, // 10: peerdb_route.QRepMirrorStatus.config:type_name -> peerdb_flow.QRepConfig - 16, // 11: peerdb_route.QRepMirrorStatus.partitions:type_name -> peerdb_route.PartitionStatus - 40, // 12: peerdb_route.CDCSyncStatus.start_time:type_name -> google.protobuf.Timestamp - 40, // 13: peerdb_route.CDCSyncStatus.end_time:type_name -> google.protobuf.Timestamp - 26, // 14: peerdb_route.PeerSlotResponse.slot_data:type_name -> peerdb_route.SlotInfo - 27, // 15: peerdb_route.PeerStatResponse.stat_data:type_name -> peerdb_route.StatInfo - 17, // 16: peerdb_route.SnapshotStatus.clones:type_name -> peerdb_route.QRepMirrorStatus - 37, // 17: peerdb_route.CDCMirrorStatus.config:type_name -> peerdb_flow.FlowConnectionConfigs - 30, // 18: peerdb_route.CDCMirrorStatus.snapshot_status:type_name -> peerdb_route.SnapshotStatus - 18, // 19: peerdb_route.CDCMirrorStatus.cdc_syncs:type_name -> peerdb_route.CDCSyncStatus - 17, // 20: peerdb_route.MirrorStatusResponse.qrep_status:type_name -> peerdb_route.QRepMirrorStatus - 31, // 21: peerdb_route.MirrorStatusResponse.cdc_status:type_name -> peerdb_route.CDCMirrorStatus - 2, // 22: peerdb_route.FlowStateChangeRequest.requested_flow_state:type_name -> peerdb_route.FlowState - 9, // 23: peerdb_route.FlowService.ValidatePeer:input_type -> peerdb_route.ValidatePeerRequest - 10, // 24: peerdb_route.FlowService.CreatePeer:input_type -> peerdb_route.CreatePeerRequest - 11, // 25: peerdb_route.FlowService.DropPeer:input_type -> peerdb_route.DropPeerRequest - 3, // 26: peerdb_route.FlowService.CreateCDCFlow:input_type -> peerdb_route.CreateCDCFlowRequest - 5, // 27: peerdb_route.FlowService.CreateQRepFlow:input_type -> peerdb_route.CreateQRepFlowRequest - 25, // 28: peerdb_route.FlowService.GetSchemas:input_type -> peerdb_route.PostgresPeerActivityInfoRequest - 20, // 29: peerdb_route.FlowService.GetTablesInSchema:input_type -> peerdb_route.SchemaTablesRequest - 25, // 30: peerdb_route.FlowService.GetAllTables:input_type -> peerdb_route.PostgresPeerActivityInfoRequest - 23, // 31: peerdb_route.FlowService.GetColumns:input_type -> peerdb_route.TableColumnsRequest - 25, // 32: peerdb_route.FlowService.GetSlotInfo:input_type -> peerdb_route.PostgresPeerActivityInfoRequest - 25, // 33: peerdb_route.FlowService.GetStatInfo:input_type -> peerdb_route.PostgresPeerActivityInfoRequest - 7, // 34: peerdb_route.FlowService.ShutdownFlow:input_type -> peerdb_route.ShutdownRequest - 33, // 35: peerdb_route.FlowService.FlowStateChange:input_type -> peerdb_route.FlowStateChangeRequest - 15, // 36: peerdb_route.FlowService.MirrorStatus:input_type -> peerdb_route.MirrorStatusRequest - 35, // 37: peerdb_route.FlowService.GetVersion:input_type -> peerdb_route.PeerDBVersionRequest - 13, // 38: peerdb_route.FlowService.ValidatePeer:output_type -> peerdb_route.ValidatePeerResponse - 14, // 39: peerdb_route.FlowService.CreatePeer:output_type -> peerdb_route.CreatePeerResponse - 12, // 40: peerdb_route.FlowService.DropPeer:output_type -> peerdb_route.DropPeerResponse - 4, // 41: peerdb_route.FlowService.CreateCDCFlow:output_type -> peerdb_route.CreateCDCFlowResponse - 6, // 42: peerdb_route.FlowService.CreateQRepFlow:output_type -> peerdb_route.CreateQRepFlowResponse - 19, // 43: peerdb_route.FlowService.GetSchemas:output_type -> peerdb_route.PeerSchemasResponse - 21, // 44: peerdb_route.FlowService.GetTablesInSchema:output_type -> peerdb_route.SchemaTablesResponse - 22, // 45: peerdb_route.FlowService.GetAllTables:output_type -> peerdb_route.AllTablesResponse - 24, // 46: peerdb_route.FlowService.GetColumns:output_type -> peerdb_route.TableColumnsResponse - 28, // 47: peerdb_route.FlowService.GetSlotInfo:output_type -> peerdb_route.PeerSlotResponse - 29, // 48: peerdb_route.FlowService.GetStatInfo:output_type -> peerdb_route.PeerStatResponse - 8, // 49: peerdb_route.FlowService.ShutdownFlow:output_type -> peerdb_route.ShutdownResponse - 34, // 50: peerdb_route.FlowService.FlowStateChange:output_type -> peerdb_route.FlowStateChangeResponse - 32, // 51: peerdb_route.FlowService.MirrorStatus:output_type -> peerdb_route.MirrorStatusResponse - 36, // 52: peerdb_route.FlowService.GetVersion:output_type -> peerdb_route.PeerDBVersionResponse - 38, // [38:53] is the sub-list for method output_type - 23, // [23:38] is the sub-list for method input_type - 23, // [23:23] is the sub-list for extension type_name - 23, // [23:23] is the sub-list for extension extendee - 0, // [0:23] is the sub-list for field type_name -} - -func init() { file_route_proto_init() } -func file_route_proto_init() { - if File_route_proto != nil { - return - } - file_peers_proto_init() - file_flow_proto_init() - if !protoimpl.UnsafeEnabled { - file_route_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateCDCFlowRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateCDCFlowResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateQRepFlowRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateQRepFlowResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShutdownRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShutdownResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreatePeerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropPeerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropPeerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreatePeerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MirrorStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PartitionStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QRepMirrorStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CDCSyncStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerSchemasResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaTablesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaTablesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AllTablesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableColumnsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableColumnsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PostgresPeerActivityInfoRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SlotInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerSlotResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerStatResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SnapshotStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CDCMirrorStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MirrorStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FlowStateChangeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FlowStateChangeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerDBVersionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_route_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerDBVersionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_route_proto_msgTypes[29].OneofWrappers = []interface{}{ - (*MirrorStatusResponse_QrepStatus)(nil), - (*MirrorStatusResponse_CdcStatus)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_route_proto_rawDesc, - NumEnums: 3, - NumMessages: 34, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_route_proto_goTypes, - DependencyIndexes: file_route_proto_depIdxs, - EnumInfos: file_route_proto_enumTypes, - MessageInfos: file_route_proto_msgTypes, - }.Build() - File_route_proto = out.File - file_route_proto_rawDesc = nil - file_route_proto_goTypes = nil - file_route_proto_depIdxs = nil -} diff --git a/flow/generated/protos/route.pb.gw.go b/flow/generated/protos/route.pb.gw.go deleted file mode 100644 index 708d0adb4c..0000000000 --- a/flow/generated/protos/route.pb.gw.go +++ /dev/null @@ -1,1322 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: route.proto - -/* -Package protos is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package protos - -import ( - "context" - "io" - "net/http" - - "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" - "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = metadata.Join - -func request_FlowService_ValidatePeer_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ValidatePeerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ValidatePeer(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_ValidatePeer_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ValidatePeerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ValidatePeer(ctx, &protoReq) - return msg, metadata, err - -} - -func request_FlowService_CreatePeer_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CreatePeerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.CreatePeer(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_CreatePeer_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CreatePeerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.CreatePeer(ctx, &protoReq) - return msg, metadata, err - -} - -func request_FlowService_DropPeer_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DropPeerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.DropPeer(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_DropPeer_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DropPeerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.DropPeer(ctx, &protoReq) - return msg, metadata, err - -} - -func request_FlowService_CreateCDCFlow_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CreateCDCFlowRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.CreateCDCFlow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_CreateCDCFlow_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CreateCDCFlowRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.CreateCDCFlow(ctx, &protoReq) - return msg, metadata, err - -} - -func request_FlowService_CreateQRepFlow_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CreateQRepFlowRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.CreateQRepFlow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_CreateQRepFlow_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CreateQRepFlowRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.CreateQRepFlow(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_FlowService_GetSchemas_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_FlowService_GetSchemas_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PostgresPeerActivityInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_FlowService_GetSchemas_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetSchemas(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_GetSchemas_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PostgresPeerActivityInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_FlowService_GetSchemas_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetSchemas(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_FlowService_GetTablesInSchema_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_FlowService_GetTablesInSchema_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SchemaTablesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_FlowService_GetTablesInSchema_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetTablesInSchema(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_GetTablesInSchema_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SchemaTablesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_FlowService_GetTablesInSchema_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetTablesInSchema(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_FlowService_GetAllTables_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_FlowService_GetAllTables_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PostgresPeerActivityInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_FlowService_GetAllTables_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetAllTables(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_GetAllTables_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PostgresPeerActivityInfoRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_FlowService_GetAllTables_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetAllTables(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_FlowService_GetColumns_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_FlowService_GetColumns_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq TableColumnsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_FlowService_GetColumns_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetColumns(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_GetColumns_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq TableColumnsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_FlowService_GetColumns_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetColumns(ctx, &protoReq) - return msg, metadata, err - -} - -func request_FlowService_GetSlotInfo_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PostgresPeerActivityInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["peer_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "peer_name") - } - - protoReq.PeerName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "peer_name", err) - } - - msg, err := client.GetSlotInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_GetSlotInfo_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PostgresPeerActivityInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["peer_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "peer_name") - } - - protoReq.PeerName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "peer_name", err) - } - - msg, err := server.GetSlotInfo(ctx, &protoReq) - return msg, metadata, err - -} - -func request_FlowService_GetStatInfo_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PostgresPeerActivityInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["peer_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "peer_name") - } - - protoReq.PeerName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "peer_name", err) - } - - msg, err := client.GetStatInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_GetStatInfo_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PostgresPeerActivityInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["peer_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "peer_name") - } - - protoReq.PeerName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "peer_name", err) - } - - msg, err := server.GetStatInfo(ctx, &protoReq) - return msg, metadata, err - -} - -func request_FlowService_ShutdownFlow_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ShutdownRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ShutdownFlow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_ShutdownFlow_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ShutdownRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ShutdownFlow(ctx, &protoReq) - return msg, metadata, err - -} - -func request_FlowService_MirrorStatus_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MirrorStatusRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["flow_job_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "flow_job_name") - } - - protoReq.FlowJobName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "flow_job_name", err) - } - - msg, err := client.MirrorStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_MirrorStatus_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MirrorStatusRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["flow_job_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "flow_job_name") - } - - protoReq.FlowJobName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "flow_job_name", err) - } - - msg, err := server.MirrorStatus(ctx, &protoReq) - return msg, metadata, err - -} - -func request_FlowService_GetVersion_0(ctx context.Context, marshaler runtime.Marshaler, client FlowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PeerDBVersionRequest - var metadata runtime.ServerMetadata - - msg, err := client.GetVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_FlowService_GetVersion_0(ctx context.Context, marshaler runtime.Marshaler, server FlowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PeerDBVersionRequest - var metadata runtime.ServerMetadata - - msg, err := server.GetVersion(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterFlowServiceHandlerServer registers the http handlers for service FlowService to "mux". -// UnaryRPC :call FlowServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterFlowServiceHandlerFromEndpoint instead. -func RegisterFlowServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server FlowServiceServer) error { - - mux.Handle("POST", pattern_FlowService_ValidatePeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/ValidatePeer", runtime.WithHTTPPathPattern("/v1/peers/validate")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_ValidatePeer_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_ValidatePeer_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_CreatePeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/CreatePeer", runtime.WithHTTPPathPattern("/v1/peers/create")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_CreatePeer_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_CreatePeer_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_DropPeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/DropPeer", runtime.WithHTTPPathPattern("/v1/peers/drop")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_DropPeer_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_DropPeer_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_CreateCDCFlow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/CreateCDCFlow", runtime.WithHTTPPathPattern("/v1/flows/cdc/create")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_CreateCDCFlow_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_CreateCDCFlow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_CreateQRepFlow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/CreateQRepFlow", runtime.WithHTTPPathPattern("/v1/flows/qrep/create")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_CreateQRepFlow_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_CreateQRepFlow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetSchemas_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/GetSchemas", runtime.WithHTTPPathPattern("/v1/peers/schemas")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_GetSchemas_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetSchemas_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetTablesInSchema_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/GetTablesInSchema", runtime.WithHTTPPathPattern("/v1/peers/tables")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_GetTablesInSchema_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetTablesInSchema_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetAllTables_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/GetAllTables", runtime.WithHTTPPathPattern("/v1/peers/tables/all")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_GetAllTables_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetAllTables_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetColumns_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/GetColumns", runtime.WithHTTPPathPattern("/v1/peers/columns")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_GetColumns_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetColumns_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetSlotInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/GetSlotInfo", runtime.WithHTTPPathPattern("/v1/peers/slots/{peer_name}")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_GetSlotInfo_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetSlotInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetStatInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/GetStatInfo", runtime.WithHTTPPathPattern("/v1/peers/stats/{peer_name}")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_GetStatInfo_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetStatInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_ShutdownFlow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/ShutdownFlow", runtime.WithHTTPPathPattern("/v1/mirrors/drop")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_ShutdownFlow_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_ShutdownFlow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_MirrorStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/MirrorStatus", runtime.WithHTTPPathPattern("/v1/mirrors/{flow_job_name}")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_MirrorStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_MirrorStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/peerdb_route.FlowService/GetVersion", runtime.WithHTTPPathPattern("/v1/version")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_FlowService_GetVersion_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetVersion_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterFlowServiceHandlerFromEndpoint is same as RegisterFlowServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterFlowServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.DialContext(ctx, endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterFlowServiceHandler(ctx, mux, conn) -} - -// RegisterFlowServiceHandler registers the http handlers for service FlowService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterFlowServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterFlowServiceHandlerClient(ctx, mux, NewFlowServiceClient(conn)) -} - -// RegisterFlowServiceHandlerClient registers the http handlers for service FlowService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "FlowServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "FlowServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "FlowServiceClient" to call the correct interceptors. -func RegisterFlowServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client FlowServiceClient) error { - - mux.Handle("POST", pattern_FlowService_ValidatePeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/ValidatePeer", runtime.WithHTTPPathPattern("/v1/peers/validate")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_ValidatePeer_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_ValidatePeer_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_CreatePeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/CreatePeer", runtime.WithHTTPPathPattern("/v1/peers/create")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_CreatePeer_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_CreatePeer_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_DropPeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/DropPeer", runtime.WithHTTPPathPattern("/v1/peers/drop")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_DropPeer_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_DropPeer_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_CreateCDCFlow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/CreateCDCFlow", runtime.WithHTTPPathPattern("/v1/flows/cdc/create")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_CreateCDCFlow_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_CreateCDCFlow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_CreateQRepFlow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/CreateQRepFlow", runtime.WithHTTPPathPattern("/v1/flows/qrep/create")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_CreateQRepFlow_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_CreateQRepFlow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetSchemas_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/GetSchemas", runtime.WithHTTPPathPattern("/v1/peers/schemas")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_GetSchemas_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetSchemas_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetTablesInSchema_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/GetTablesInSchema", runtime.WithHTTPPathPattern("/v1/peers/tables")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_GetTablesInSchema_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetTablesInSchema_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetAllTables_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/GetAllTables", runtime.WithHTTPPathPattern("/v1/peers/tables/all")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_GetAllTables_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetAllTables_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetColumns_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/GetColumns", runtime.WithHTTPPathPattern("/v1/peers/columns")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_GetColumns_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetColumns_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetSlotInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/GetSlotInfo", runtime.WithHTTPPathPattern("/v1/peers/slots/{peer_name}")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_GetSlotInfo_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetSlotInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetStatInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/GetStatInfo", runtime.WithHTTPPathPattern("/v1/peers/stats/{peer_name}")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_GetStatInfo_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetStatInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_FlowService_ShutdownFlow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/ShutdownFlow", runtime.WithHTTPPathPattern("/v1/mirrors/drop")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_ShutdownFlow_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_ShutdownFlow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_MirrorStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/MirrorStatus", runtime.WithHTTPPathPattern("/v1/mirrors/{flow_job_name}")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_MirrorStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_MirrorStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_FlowService_GetVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/peerdb_route.FlowService/GetVersion", runtime.WithHTTPPathPattern("/v1/version")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_FlowService_GetVersion_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_FlowService_GetVersion_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_FlowService_ValidatePeer_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "peers", "validate"}, "")) - - pattern_FlowService_CreatePeer_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "peers", "create"}, "")) - - pattern_FlowService_DropPeer_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "peers", "drop"}, "")) - - pattern_FlowService_CreateCDCFlow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "flows", "cdc", "create"}, "")) - - pattern_FlowService_CreateQRepFlow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "flows", "qrep", "create"}, "")) - - pattern_FlowService_GetSchemas_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "peers", "schemas"}, "")) - - pattern_FlowService_GetTablesInSchema_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "peers", "tables"}, "")) - - pattern_FlowService_GetAllTables_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "peers", "tables", "all"}, "")) - - pattern_FlowService_GetColumns_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "peers", "columns"}, "")) - - pattern_FlowService_GetSlotInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "peers", "slots", "peer_name"}, "")) - - pattern_FlowService_GetStatInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "peers", "stats", "peer_name"}, "")) - - pattern_FlowService_ShutdownFlow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "mirrors", "drop"}, "")) - - pattern_FlowService_MirrorStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "mirrors", "flow_job_name"}, "")) - - pattern_FlowService_GetVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "version"}, "")) -) - -var ( - forward_FlowService_ValidatePeer_0 = runtime.ForwardResponseMessage - - forward_FlowService_CreatePeer_0 = runtime.ForwardResponseMessage - - forward_FlowService_DropPeer_0 = runtime.ForwardResponseMessage - - forward_FlowService_CreateCDCFlow_0 = runtime.ForwardResponseMessage - - forward_FlowService_CreateQRepFlow_0 = runtime.ForwardResponseMessage - - forward_FlowService_GetSchemas_0 = runtime.ForwardResponseMessage - - forward_FlowService_GetTablesInSchema_0 = runtime.ForwardResponseMessage - - forward_FlowService_GetAllTables_0 = runtime.ForwardResponseMessage - - forward_FlowService_GetColumns_0 = runtime.ForwardResponseMessage - - forward_FlowService_GetSlotInfo_0 = runtime.ForwardResponseMessage - - forward_FlowService_GetStatInfo_0 = runtime.ForwardResponseMessage - - forward_FlowService_ShutdownFlow_0 = runtime.ForwardResponseMessage - - forward_FlowService_MirrorStatus_0 = runtime.ForwardResponseMessage - - forward_FlowService_GetVersion_0 = runtime.ForwardResponseMessage -) diff --git a/flow/generated/protos/route_grpc.pb.go b/flow/generated/protos/route_grpc.pb.go deleted file mode 100644 index 6d36c84fee..0000000000 --- a/flow/generated/protos/route_grpc.pb.go +++ /dev/null @@ -1,627 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc (unknown) -// source: route.proto - -package protos - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - FlowService_ValidatePeer_FullMethodName = "/peerdb_route.FlowService/ValidatePeer" - FlowService_CreatePeer_FullMethodName = "/peerdb_route.FlowService/CreatePeer" - FlowService_DropPeer_FullMethodName = "/peerdb_route.FlowService/DropPeer" - FlowService_CreateCDCFlow_FullMethodName = "/peerdb_route.FlowService/CreateCDCFlow" - FlowService_CreateQRepFlow_FullMethodName = "/peerdb_route.FlowService/CreateQRepFlow" - FlowService_GetSchemas_FullMethodName = "/peerdb_route.FlowService/GetSchemas" - FlowService_GetTablesInSchema_FullMethodName = "/peerdb_route.FlowService/GetTablesInSchema" - FlowService_GetAllTables_FullMethodName = "/peerdb_route.FlowService/GetAllTables" - FlowService_GetColumns_FullMethodName = "/peerdb_route.FlowService/GetColumns" - FlowService_GetSlotInfo_FullMethodName = "/peerdb_route.FlowService/GetSlotInfo" - FlowService_GetStatInfo_FullMethodName = "/peerdb_route.FlowService/GetStatInfo" - FlowService_ShutdownFlow_FullMethodName = "/peerdb_route.FlowService/ShutdownFlow" - FlowService_FlowStateChange_FullMethodName = "/peerdb_route.FlowService/FlowStateChange" - FlowService_MirrorStatus_FullMethodName = "/peerdb_route.FlowService/MirrorStatus" - FlowService_GetVersion_FullMethodName = "/peerdb_route.FlowService/GetVersion" -) - -// FlowServiceClient is the client API for FlowService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type FlowServiceClient interface { - ValidatePeer(ctx context.Context, in *ValidatePeerRequest, opts ...grpc.CallOption) (*ValidatePeerResponse, error) - CreatePeer(ctx context.Context, in *CreatePeerRequest, opts ...grpc.CallOption) (*CreatePeerResponse, error) - DropPeer(ctx context.Context, in *DropPeerRequest, opts ...grpc.CallOption) (*DropPeerResponse, error) - CreateCDCFlow(ctx context.Context, in *CreateCDCFlowRequest, opts ...grpc.CallOption) (*CreateCDCFlowResponse, error) - CreateQRepFlow(ctx context.Context, in *CreateQRepFlowRequest, opts ...grpc.CallOption) (*CreateQRepFlowResponse, error) - GetSchemas(ctx context.Context, in *PostgresPeerActivityInfoRequest, opts ...grpc.CallOption) (*PeerSchemasResponse, error) - GetTablesInSchema(ctx context.Context, in *SchemaTablesRequest, opts ...grpc.CallOption) (*SchemaTablesResponse, error) - GetAllTables(ctx context.Context, in *PostgresPeerActivityInfoRequest, opts ...grpc.CallOption) (*AllTablesResponse, error) - GetColumns(ctx context.Context, in *TableColumnsRequest, opts ...grpc.CallOption) (*TableColumnsResponse, error) - GetSlotInfo(ctx context.Context, in *PostgresPeerActivityInfoRequest, opts ...grpc.CallOption) (*PeerSlotResponse, error) - GetStatInfo(ctx context.Context, in *PostgresPeerActivityInfoRequest, opts ...grpc.CallOption) (*PeerStatResponse, error) - ShutdownFlow(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) - FlowStateChange(ctx context.Context, in *FlowStateChangeRequest, opts ...grpc.CallOption) (*FlowStateChangeResponse, error) - MirrorStatus(ctx context.Context, in *MirrorStatusRequest, opts ...grpc.CallOption) (*MirrorStatusResponse, error) - GetVersion(ctx context.Context, in *PeerDBVersionRequest, opts ...grpc.CallOption) (*PeerDBVersionResponse, error) -} - -type flowServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewFlowServiceClient(cc grpc.ClientConnInterface) FlowServiceClient { - return &flowServiceClient{cc} -} - -func (c *flowServiceClient) ValidatePeer(ctx context.Context, in *ValidatePeerRequest, opts ...grpc.CallOption) (*ValidatePeerResponse, error) { - out := new(ValidatePeerResponse) - err := c.cc.Invoke(ctx, FlowService_ValidatePeer_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) CreatePeer(ctx context.Context, in *CreatePeerRequest, opts ...grpc.CallOption) (*CreatePeerResponse, error) { - out := new(CreatePeerResponse) - err := c.cc.Invoke(ctx, FlowService_CreatePeer_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) DropPeer(ctx context.Context, in *DropPeerRequest, opts ...grpc.CallOption) (*DropPeerResponse, error) { - out := new(DropPeerResponse) - err := c.cc.Invoke(ctx, FlowService_DropPeer_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) CreateCDCFlow(ctx context.Context, in *CreateCDCFlowRequest, opts ...grpc.CallOption) (*CreateCDCFlowResponse, error) { - out := new(CreateCDCFlowResponse) - err := c.cc.Invoke(ctx, FlowService_CreateCDCFlow_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) CreateQRepFlow(ctx context.Context, in *CreateQRepFlowRequest, opts ...grpc.CallOption) (*CreateQRepFlowResponse, error) { - out := new(CreateQRepFlowResponse) - err := c.cc.Invoke(ctx, FlowService_CreateQRepFlow_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) GetSchemas(ctx context.Context, in *PostgresPeerActivityInfoRequest, opts ...grpc.CallOption) (*PeerSchemasResponse, error) { - out := new(PeerSchemasResponse) - err := c.cc.Invoke(ctx, FlowService_GetSchemas_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) GetTablesInSchema(ctx context.Context, in *SchemaTablesRequest, opts ...grpc.CallOption) (*SchemaTablesResponse, error) { - out := new(SchemaTablesResponse) - err := c.cc.Invoke(ctx, FlowService_GetTablesInSchema_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) GetAllTables(ctx context.Context, in *PostgresPeerActivityInfoRequest, opts ...grpc.CallOption) (*AllTablesResponse, error) { - out := new(AllTablesResponse) - err := c.cc.Invoke(ctx, FlowService_GetAllTables_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) GetColumns(ctx context.Context, in *TableColumnsRequest, opts ...grpc.CallOption) (*TableColumnsResponse, error) { - out := new(TableColumnsResponse) - err := c.cc.Invoke(ctx, FlowService_GetColumns_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) GetSlotInfo(ctx context.Context, in *PostgresPeerActivityInfoRequest, opts ...grpc.CallOption) (*PeerSlotResponse, error) { - out := new(PeerSlotResponse) - err := c.cc.Invoke(ctx, FlowService_GetSlotInfo_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) GetStatInfo(ctx context.Context, in *PostgresPeerActivityInfoRequest, opts ...grpc.CallOption) (*PeerStatResponse, error) { - out := new(PeerStatResponse) - err := c.cc.Invoke(ctx, FlowService_GetStatInfo_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) ShutdownFlow(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) { - out := new(ShutdownResponse) - err := c.cc.Invoke(ctx, FlowService_ShutdownFlow_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) FlowStateChange(ctx context.Context, in *FlowStateChangeRequest, opts ...grpc.CallOption) (*FlowStateChangeResponse, error) { - out := new(FlowStateChangeResponse) - err := c.cc.Invoke(ctx, FlowService_FlowStateChange_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) MirrorStatus(ctx context.Context, in *MirrorStatusRequest, opts ...grpc.CallOption) (*MirrorStatusResponse, error) { - out := new(MirrorStatusResponse) - err := c.cc.Invoke(ctx, FlowService_MirrorStatus_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *flowServiceClient) GetVersion(ctx context.Context, in *PeerDBVersionRequest, opts ...grpc.CallOption) (*PeerDBVersionResponse, error) { - out := new(PeerDBVersionResponse) - err := c.cc.Invoke(ctx, FlowService_GetVersion_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// FlowServiceServer is the server API for FlowService service. -// All implementations must embed UnimplementedFlowServiceServer -// for forward compatibility -type FlowServiceServer interface { - ValidatePeer(context.Context, *ValidatePeerRequest) (*ValidatePeerResponse, error) - CreatePeer(context.Context, *CreatePeerRequest) (*CreatePeerResponse, error) - DropPeer(context.Context, *DropPeerRequest) (*DropPeerResponse, error) - CreateCDCFlow(context.Context, *CreateCDCFlowRequest) (*CreateCDCFlowResponse, error) - CreateQRepFlow(context.Context, *CreateQRepFlowRequest) (*CreateQRepFlowResponse, error) - GetSchemas(context.Context, *PostgresPeerActivityInfoRequest) (*PeerSchemasResponse, error) - GetTablesInSchema(context.Context, *SchemaTablesRequest) (*SchemaTablesResponse, error) - GetAllTables(context.Context, *PostgresPeerActivityInfoRequest) (*AllTablesResponse, error) - GetColumns(context.Context, *TableColumnsRequest) (*TableColumnsResponse, error) - GetSlotInfo(context.Context, *PostgresPeerActivityInfoRequest) (*PeerSlotResponse, error) - GetStatInfo(context.Context, *PostgresPeerActivityInfoRequest) (*PeerStatResponse, error) - ShutdownFlow(context.Context, *ShutdownRequest) (*ShutdownResponse, error) - FlowStateChange(context.Context, *FlowStateChangeRequest) (*FlowStateChangeResponse, error) - MirrorStatus(context.Context, *MirrorStatusRequest) (*MirrorStatusResponse, error) - GetVersion(context.Context, *PeerDBVersionRequest) (*PeerDBVersionResponse, error) - mustEmbedUnimplementedFlowServiceServer() -} - -// UnimplementedFlowServiceServer must be embedded to have forward compatible implementations. -type UnimplementedFlowServiceServer struct { -} - -func (UnimplementedFlowServiceServer) ValidatePeer(context.Context, *ValidatePeerRequest) (*ValidatePeerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ValidatePeer not implemented") -} -func (UnimplementedFlowServiceServer) CreatePeer(context.Context, *CreatePeerRequest) (*CreatePeerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreatePeer not implemented") -} -func (UnimplementedFlowServiceServer) DropPeer(context.Context, *DropPeerRequest) (*DropPeerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DropPeer not implemented") -} -func (UnimplementedFlowServiceServer) CreateCDCFlow(context.Context, *CreateCDCFlowRequest) (*CreateCDCFlowResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateCDCFlow not implemented") -} -func (UnimplementedFlowServiceServer) CreateQRepFlow(context.Context, *CreateQRepFlowRequest) (*CreateQRepFlowResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateQRepFlow not implemented") -} -func (UnimplementedFlowServiceServer) GetSchemas(context.Context, *PostgresPeerActivityInfoRequest) (*PeerSchemasResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSchemas not implemented") -} -func (UnimplementedFlowServiceServer) GetTablesInSchema(context.Context, *SchemaTablesRequest) (*SchemaTablesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTablesInSchema not implemented") -} -func (UnimplementedFlowServiceServer) GetAllTables(context.Context, *PostgresPeerActivityInfoRequest) (*AllTablesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllTables not implemented") -} -func (UnimplementedFlowServiceServer) GetColumns(context.Context, *TableColumnsRequest) (*TableColumnsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetColumns not implemented") -} -func (UnimplementedFlowServiceServer) GetSlotInfo(context.Context, *PostgresPeerActivityInfoRequest) (*PeerSlotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSlotInfo not implemented") -} -func (UnimplementedFlowServiceServer) GetStatInfo(context.Context, *PostgresPeerActivityInfoRequest) (*PeerStatResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetStatInfo not implemented") -} -func (UnimplementedFlowServiceServer) ShutdownFlow(context.Context, *ShutdownRequest) (*ShutdownResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ShutdownFlow not implemented") -} -func (UnimplementedFlowServiceServer) FlowStateChange(context.Context, *FlowStateChangeRequest) (*FlowStateChangeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FlowStateChange not implemented") -} -func (UnimplementedFlowServiceServer) MirrorStatus(context.Context, *MirrorStatusRequest) (*MirrorStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MirrorStatus not implemented") -} -func (UnimplementedFlowServiceServer) GetVersion(context.Context, *PeerDBVersionRequest) (*PeerDBVersionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented") -} -func (UnimplementedFlowServiceServer) mustEmbedUnimplementedFlowServiceServer() {} - -// UnsafeFlowServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to FlowServiceServer will -// result in compilation errors. -type UnsafeFlowServiceServer interface { - mustEmbedUnimplementedFlowServiceServer() -} - -func RegisterFlowServiceServer(s grpc.ServiceRegistrar, srv FlowServiceServer) { - s.RegisterService(&FlowService_ServiceDesc, srv) -} - -func _FlowService_ValidatePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidatePeerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).ValidatePeer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_ValidatePeer_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).ValidatePeer(ctx, req.(*ValidatePeerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_CreatePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreatePeerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).CreatePeer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_CreatePeer_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).CreatePeer(ctx, req.(*CreatePeerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_DropPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DropPeerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).DropPeer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_DropPeer_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).DropPeer(ctx, req.(*DropPeerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_CreateCDCFlow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateCDCFlowRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).CreateCDCFlow(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_CreateCDCFlow_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).CreateCDCFlow(ctx, req.(*CreateCDCFlowRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_CreateQRepFlow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateQRepFlowRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).CreateQRepFlow(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_CreateQRepFlow_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).CreateQRepFlow(ctx, req.(*CreateQRepFlowRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_GetSchemas_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PostgresPeerActivityInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).GetSchemas(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_GetSchemas_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).GetSchemas(ctx, req.(*PostgresPeerActivityInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_GetTablesInSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SchemaTablesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).GetTablesInSchema(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_GetTablesInSchema_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).GetTablesInSchema(ctx, req.(*SchemaTablesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_GetAllTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PostgresPeerActivityInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).GetAllTables(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_GetAllTables_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).GetAllTables(ctx, req.(*PostgresPeerActivityInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_GetColumns_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TableColumnsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).GetColumns(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_GetColumns_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).GetColumns(ctx, req.(*TableColumnsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_GetSlotInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PostgresPeerActivityInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).GetSlotInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_GetSlotInfo_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).GetSlotInfo(ctx, req.(*PostgresPeerActivityInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_GetStatInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PostgresPeerActivityInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).GetStatInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_GetStatInfo_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).GetStatInfo(ctx, req.(*PostgresPeerActivityInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_ShutdownFlow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ShutdownRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).ShutdownFlow(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_ShutdownFlow_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).ShutdownFlow(ctx, req.(*ShutdownRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_FlowStateChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FlowStateChangeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).FlowStateChange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_FlowStateChange_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).FlowStateChange(ctx, req.(*FlowStateChangeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_MirrorStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MirrorStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).MirrorStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_MirrorStatus_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).MirrorStatus(ctx, req.(*MirrorStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _FlowService_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PeerDBVersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FlowServiceServer).GetVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: FlowService_GetVersion_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FlowServiceServer).GetVersion(ctx, req.(*PeerDBVersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// FlowService_ServiceDesc is the grpc.ServiceDesc for FlowService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var FlowService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "peerdb_route.FlowService", - HandlerType: (*FlowServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ValidatePeer", - Handler: _FlowService_ValidatePeer_Handler, - }, - { - MethodName: "CreatePeer", - Handler: _FlowService_CreatePeer_Handler, - }, - { - MethodName: "DropPeer", - Handler: _FlowService_DropPeer_Handler, - }, - { - MethodName: "CreateCDCFlow", - Handler: _FlowService_CreateCDCFlow_Handler, - }, - { - MethodName: "CreateQRepFlow", - Handler: _FlowService_CreateQRepFlow_Handler, - }, - { - MethodName: "GetSchemas", - Handler: _FlowService_GetSchemas_Handler, - }, - { - MethodName: "GetTablesInSchema", - Handler: _FlowService_GetTablesInSchema_Handler, - }, - { - MethodName: "GetAllTables", - Handler: _FlowService_GetAllTables_Handler, - }, - { - MethodName: "GetColumns", - Handler: _FlowService_GetColumns_Handler, - }, - { - MethodName: "GetSlotInfo", - Handler: _FlowService_GetSlotInfo_Handler, - }, - { - MethodName: "GetStatInfo", - Handler: _FlowService_GetStatInfo_Handler, - }, - { - MethodName: "ShutdownFlow", - Handler: _FlowService_ShutdownFlow_Handler, - }, - { - MethodName: "FlowStateChange", - Handler: _FlowService_FlowStateChange_Handler, - }, - { - MethodName: "MirrorStatus", - Handler: _FlowService_MirrorStatus_Handler, - }, - { - MethodName: "GetVersion", - Handler: _FlowService_GetVersion_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "route.proto", -} diff --git a/nexus/pt/src/google.api.rs b/nexus/pt/src/google.api.rs deleted file mode 100644 index c758f27ecc..0000000000 --- a/nexus/pt/src/google.api.rs +++ /dev/null @@ -1,374 +0,0 @@ -// @generated -/// Defines the HTTP configuration for an API service. It contains a list of -/// \[HttpRule][google.api.HttpRule\], each specifying the mapping of an RPC method -/// to one or more HTTP REST API methods. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Http { - /// A list of HTTP configuration rules that apply to individual API methods. - /// - /// **NOTE:** All service configuration rules follow "last one wins" order. - #[prost(message, repeated, tag="1")] - pub rules: ::prost::alloc::vec::Vec, - /// When set to true, URL path parameters will be fully URI-decoded except in - /// cases of single segment matches in reserved expansion, where "%2F" will be - /// left encoded. - /// - /// The default behavior is to not decode RFC 6570 reserved characters in multi - /// segment matches. - #[prost(bool, tag="2")] - pub fully_decode_reserved_expansion: bool, -} -/// # gRPC Transcoding -/// -/// gRPC Transcoding is a feature for mapping between a gRPC method and one or -/// more HTTP REST endpoints. It allows developers to build a single API service -/// that supports both gRPC APIs and REST APIs. Many systems, including [Google -/// APIs](), -/// [Cloud Endpoints](), [gRPC -/// Gateway](), -/// and \[Envoy\]() proxy support this feature -/// and use it for large scale production services. -/// -/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -/// how different portions of the gRPC request message are mapped to the URL -/// path, URL query parameters, and HTTP request body. It also controls how the -/// gRPC response message is mapped to the HTTP response body. `HttpRule` is -/// typically specified as an `google.api.http` annotation on the gRPC method. -/// -/// Each mapping specifies a URL path template and an HTTP method. The path -/// template may refer to one or more fields in the gRPC request message, as long -/// as each field is a non-repeated field with a primitive (non-message) type. -/// The path template controls how fields of the request message are mapped to -/// the URL path. -/// -/// Example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get: "/v1/{name=messages/*}" -/// }; -/// } -/// } -/// message GetMessageRequest { -/// string name = 1; // Mapped to URL path. -/// } -/// message Message { -/// string text = 1; // The resource content. -/// } -/// -/// This enables an HTTP REST to gRPC mapping as below: -/// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -/// -/// Any fields in the request message which are not bound by the path template -/// automatically become HTTP query parameters if there is no HTTP request body. -/// For example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get:"/v1/messages/{message_id}" -/// }; -/// } -/// } -/// message GetMessageRequest { -/// message SubMessage { -/// string subfield = 1; -/// } -/// string message_id = 1; // Mapped to URL path. -/// int64 revision = 2; // Mapped to URL query parameter `revision`. -/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -/// } -/// -/// This enables a HTTP JSON to RPC mapping as below: -/// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -/// "foo"))` -/// -/// Note that fields which are mapped to URL query parameters must have a -/// primitive type or a repeated primitive type or a non-repeated message type. -/// In the case of a repeated type, the parameter can be repeated in the URL -/// as `...?param=A¶m=B`. In the case of a message type, each field of the -/// message is mapped to a separate parameter, such as -/// `...?foo.a=A&foo.b=B&foo.c=C`. -/// -/// For HTTP methods that allow a request body, the `body` field -/// specifies the mapping. Consider a REST update method on the -/// message resource collection: -/// -/// service Messaging { -/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// patch: "/v1/messages/{message_id}" -/// body: "message" -/// }; -/// } -/// } -/// message UpdateMessageRequest { -/// string message_id = 1; // mapped to the URL -/// Message message = 2; // mapped to the body -/// } -/// -/// The following HTTP JSON to RPC mapping is enabled, where the -/// representation of the JSON in the request body is determined by -/// protos JSON encoding: -/// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" message { text: "Hi!" })` -/// -/// The special name `*` can be used in the body mapping to define that -/// every field not bound by the path template should be mapped to the -/// request body. This enables the following alternative definition of -/// the update method: -/// -/// service Messaging { -/// rpc UpdateMessage(Message) returns (Message) { -/// option (google.api.http) = { -/// patch: "/v1/messages/{message_id}" -/// body: "*" -/// }; -/// } -/// } -/// message Message { -/// string message_id = 1; -/// string text = 2; -/// } -/// -/// -/// The following HTTP JSON to RPC mapping is enabled: -/// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" text: "Hi!")` -/// -/// Note that when using `*` in the body mapping, it is not possible to -/// have HTTP parameters, as all fields not bound by the path end in -/// the body. This makes this option more rarely used in practice when -/// defining REST APIs. The common usage of `*` is in custom methods -/// which don't use the URL at all for transferring data. -/// -/// It is possible to define multiple HTTP methods for one RPC by using -/// the `additional_bindings` option. Example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get: "/v1/messages/{message_id}" -/// additional_bindings { -/// get: "/v1/users/{user_id}/messages/{message_id}" -/// } -/// }; -/// } -/// } -/// message GetMessageRequest { -/// string message_id = 1; -/// string user_id = 2; -/// } -/// -/// This enables the following two alternative HTTP JSON to RPC mappings: -/// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -/// "123456")` -/// -/// ## Rules for HTTP mapping -/// -/// 1. Leaf request fields (recursive expansion nested messages in the request -/// message) are classified into three categories: -/// - Fields referred by the path template. They are passed via the URL path. -/// - Fields referred by the \[HttpRule.body][google.api.HttpRule.body\]. They -/// are passed via the HTTP -/// request body. -/// - All other fields are passed via the URL query parameters, and the -/// parameter name is the field path in the request message. A repeated -/// field can be represented as multiple query parameters under the same -/// name. -/// 2. If \[HttpRule.body][google.api.HttpRule.body\] is "*", there is no URL -/// query parameter, all fields -/// are passed via URL path and HTTP request body. -/// 3. If \[HttpRule.body][google.api.HttpRule.body\] is omitted, there is no HTTP -/// request body, all -/// fields are passed via URL path and URL query parameters. -/// -/// ### Path template syntax -/// -/// Template = "/" Segments [ Verb ] ; -/// Segments = Segment { "/" Segment } ; -/// Segment = "*" | "**" | LITERAL | Variable ; -/// Variable = "{" FieldPath [ "=" Segments ] "}" ; -/// FieldPath = IDENT { "." IDENT } ; -/// Verb = ":" LITERAL ; -/// -/// The syntax `*` matches a single URL path segment. The syntax `**` matches -/// zero or more URL path segments, which must be the last part of the URL path -/// except the `Verb`. -/// -/// The syntax `Variable` matches part of the URL path as specified by its -/// template. A variable template must not contain other variables. If a variable -/// matches a single path segment, its template may be omitted, e.g. `{var}` -/// is equivalent to `{var=*}`. -/// -/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -/// contains any reserved character, such characters should be percent-encoded -/// before the matching. -/// -/// If a variable contains exactly one path segment, such as `"{var}"` or -/// `"{var=*}"`, when such a variable is expanded into a URL path on the client -/// side, all characters except `\[-_.~0-9a-zA-Z\]` are percent-encoded. The -/// server side does the reverse decoding. Such variables show up in the -/// [Discovery -/// Document]() as -/// `{var}`. -/// -/// If a variable contains multiple path segments, such as `"{var=foo/*}"` -/// or `"{var=**}"`, when such a variable is expanded into a URL path on the -/// client side, all characters except `\[-_.~/0-9a-zA-Z\]` are percent-encoded. -/// The server side does the reverse decoding, except "%2F" and "%2f" are left -/// unchanged. Such variables show up in the -/// [Discovery -/// Document]() as -/// `{+var}`. -/// -/// ## Using gRPC API Service Configuration -/// -/// gRPC API Service Configuration (service config) is a configuration language -/// for configuring a gRPC service to become a user-facing product. The -/// service config is simply the YAML representation of the `google.api.Service` -/// proto message. -/// -/// As an alternative to annotating your proto file, you can configure gRPC -/// transcoding in your service config YAML files. You do this by specifying a -/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -/// effect as the proto annotation. This can be particularly useful if you -/// have a proto that is reused in multiple services. Note that any transcoding -/// specified in the service config will override any matching transcoding -/// configuration in the proto. -/// -/// Example: -/// -/// http: -/// rules: -/// # Selects a gRPC method and applies HttpRule to it. -/// - selector: example.v1.Messaging.GetMessage -/// get: /v1/messages/{message_id}/{sub.subfield} -/// -/// ## Special notes -/// -/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -/// proto to JSON conversion must follow the [proto3 -/// specification](). -/// -/// While the single segment variable follows the semantics of -/// [RFC 6570]() Section 3.2.2 Simple String -/// Expansion, the multi segment variable **does not** follow RFC 6570 Section -/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -/// does not expand special characters like `?` and `#`, which would lead -/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -/// for multi segment variables. -/// -/// The path variables **must not** refer to any repeated or mapped field, -/// because client libraries are not capable of handling such variable expansion. -/// -/// The path variables **must not** capture the leading "/" character. The reason -/// is that the most common use case "{var}" does not capture the leading "/" -/// character. For consistency, all path variables must share the same behavior. -/// -/// Repeated message fields must not be mapped to URL query parameters, because -/// no client library can support such complicated mapping. -/// -/// If an API needs to use a JSON array for request or response body, it can map -/// the request or response body to a repeated field. However, some gRPC -/// Transcoding implementations may not support this feature. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct HttpRule { - /// Selects a method to which this rule applies. - /// - /// Refer to \[selector][google.api.DocumentationRule.selector\] for syntax - /// details. - #[prost(string, tag="1")] - pub selector: ::prost::alloc::string::String, - /// The name of the request field whose value is mapped to the HTTP request - /// body, or `*` for mapping all request fields not captured by the path - /// pattern to the HTTP body, or omitted for not having any HTTP request body. - /// - /// NOTE: the referred field must be present at the top-level of the request - /// message type. - #[prost(string, tag="7")] - pub body: ::prost::alloc::string::String, - /// Optional. The name of the response field whose value is mapped to the HTTP - /// response body. When omitted, the entire response message will be used - /// as the HTTP response body. - /// - /// NOTE: The referred field must be present at the top-level of the response - /// message type. - #[prost(string, tag="12")] - pub response_body: ::prost::alloc::string::String, - /// Additional HTTP bindings for the selector. Nested bindings must - /// not contain an `additional_bindings` field themselves (that is, - /// the nesting may only be one level deep). - #[prost(message, repeated, tag="11")] - pub additional_bindings: ::prost::alloc::vec::Vec, - /// Determines the URL pattern is matched by this rules. This pattern can be - /// used with any of the {get|put|post|delete|patch} methods. A custom method - /// can be defined using the 'custom' field. - #[prost(oneof="http_rule::Pattern", tags="2, 3, 4, 5, 6, 8")] - pub pattern: ::core::option::Option, -} -/// Nested message and enum types in `HttpRule`. -pub mod http_rule { - /// Determines the URL pattern is matched by this rules. This pattern can be - /// used with any of the {get|put|post|delete|patch} methods. A custom method - /// can be defined using the 'custom' field. - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Pattern { - /// Maps to HTTP GET. Used for listing and getting information about - /// resources. - #[prost(string, tag="2")] - Get(::prost::alloc::string::String), - /// Maps to HTTP PUT. Used for replacing a resource. - #[prost(string, tag="3")] - Put(::prost::alloc::string::String), - /// Maps to HTTP POST. Used for creating a resource or performing an action. - #[prost(string, tag="4")] - Post(::prost::alloc::string::String), - /// Maps to HTTP DELETE. Used for deleting a resource. - #[prost(string, tag="5")] - Delete(::prost::alloc::string::String), - /// Maps to HTTP PATCH. Used for updating a resource. - #[prost(string, tag="6")] - Patch(::prost::alloc::string::String), - /// The custom pattern is used for specifying an HTTP method that is not - /// included in the `pattern` field, such as HEAD, or "*" to leave the - /// HTTP method unspecified for this rule. The wild-card rule is useful - /// for services that provide content to Web (HTML) clients. - #[prost(message, tag="8")] - Custom(super::CustomHttpPattern), - } -} -/// A custom pattern is used for defining custom HTTP verb. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CustomHttpPattern { - /// The name of this custom HTTP verb. - #[prost(string, tag="1")] - pub kind: ::prost::alloc::string::String, - /// The path matched by this custom verb. - #[prost(string, tag="2")] - pub path: ::prost::alloc::string::String, -} -include!("google.api.serde.rs"); -// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/nexus/pt/src/google.api.serde.rs b/nexus/pt/src/google.api.serde.rs deleted file mode 100644 index 60a18fb852..0000000000 --- a/nexus/pt/src/google.api.serde.rs +++ /dev/null @@ -1,456 +0,0 @@ -// @generated -impl serde::Serialize for CustomHttpPattern { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.kind.is_empty() { - len += 1; - } - if !self.path.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("google.api.CustomHttpPattern", len)?; - if !self.kind.is_empty() { - struct_ser.serialize_field("kind", &self.kind)?; - } - if !self.path.is_empty() { - struct_ser.serialize_field("path", &self.path)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CustomHttpPattern { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "kind", - "path", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Kind, - Path, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "kind" => Ok(GeneratedField::Kind), - "path" => Ok(GeneratedField::Path), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CustomHttpPattern; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct google.api.CustomHttpPattern") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut kind__ = None; - let mut path__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Kind => { - if kind__.is_some() { - return Err(serde::de::Error::duplicate_field("kind")); - } - kind__ = Some(map.next_value()?); - } - GeneratedField::Path => { - if path__.is_some() { - return Err(serde::de::Error::duplicate_field("path")); - } - path__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CustomHttpPattern { - kind: kind__.unwrap_or_default(), - path: path__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("google.api.CustomHttpPattern", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for Http { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.rules.is_empty() { - len += 1; - } - if self.fully_decode_reserved_expansion { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("google.api.Http", len)?; - if !self.rules.is_empty() { - struct_ser.serialize_field("rules", &self.rules)?; - } - if self.fully_decode_reserved_expansion { - struct_ser.serialize_field("fullyDecodeReservedExpansion", &self.fully_decode_reserved_expansion)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for Http { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "rules", - "fully_decode_reserved_expansion", - "fullyDecodeReservedExpansion", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Rules, - FullyDecodeReservedExpansion, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "rules" => Ok(GeneratedField::Rules), - "fullyDecodeReservedExpansion" | "fully_decode_reserved_expansion" => Ok(GeneratedField::FullyDecodeReservedExpansion), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = Http; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct google.api.Http") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut rules__ = None; - let mut fully_decode_reserved_expansion__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Rules => { - if rules__.is_some() { - return Err(serde::de::Error::duplicate_field("rules")); - } - rules__ = Some(map.next_value()?); - } - GeneratedField::FullyDecodeReservedExpansion => { - if fully_decode_reserved_expansion__.is_some() { - return Err(serde::de::Error::duplicate_field("fullyDecodeReservedExpansion")); - } - fully_decode_reserved_expansion__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(Http { - rules: rules__.unwrap_or_default(), - fully_decode_reserved_expansion: fully_decode_reserved_expansion__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("google.api.Http", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for HttpRule { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.selector.is_empty() { - len += 1; - } - if !self.body.is_empty() { - len += 1; - } - if !self.response_body.is_empty() { - len += 1; - } - if !self.additional_bindings.is_empty() { - len += 1; - } - if self.pattern.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("google.api.HttpRule", len)?; - if !self.selector.is_empty() { - struct_ser.serialize_field("selector", &self.selector)?; - } - if !self.body.is_empty() { - struct_ser.serialize_field("body", &self.body)?; - } - if !self.response_body.is_empty() { - struct_ser.serialize_field("responseBody", &self.response_body)?; - } - if !self.additional_bindings.is_empty() { - struct_ser.serialize_field("additionalBindings", &self.additional_bindings)?; - } - if let Some(v) = self.pattern.as_ref() { - match v { - http_rule::Pattern::Get(v) => { - struct_ser.serialize_field("get", v)?; - } - http_rule::Pattern::Put(v) => { - struct_ser.serialize_field("put", v)?; - } - http_rule::Pattern::Post(v) => { - struct_ser.serialize_field("post", v)?; - } - http_rule::Pattern::Delete(v) => { - struct_ser.serialize_field("delete", v)?; - } - http_rule::Pattern::Patch(v) => { - struct_ser.serialize_field("patch", v)?; - } - http_rule::Pattern::Custom(v) => { - struct_ser.serialize_field("custom", v)?; - } - } - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for HttpRule { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "selector", - "body", - "response_body", - "responseBody", - "additional_bindings", - "additionalBindings", - "get", - "put", - "post", - "delete", - "patch", - "custom", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Selector, - Body, - ResponseBody, - AdditionalBindings, - Get, - Put, - Post, - Delete, - Patch, - Custom, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "selector" => Ok(GeneratedField::Selector), - "body" => Ok(GeneratedField::Body), - "responseBody" | "response_body" => Ok(GeneratedField::ResponseBody), - "additionalBindings" | "additional_bindings" => Ok(GeneratedField::AdditionalBindings), - "get" => Ok(GeneratedField::Get), - "put" => Ok(GeneratedField::Put), - "post" => Ok(GeneratedField::Post), - "delete" => Ok(GeneratedField::Delete), - "patch" => Ok(GeneratedField::Patch), - "custom" => Ok(GeneratedField::Custom), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = HttpRule; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct google.api.HttpRule") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut selector__ = None; - let mut body__ = None; - let mut response_body__ = None; - let mut additional_bindings__ = None; - let mut pattern__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Selector => { - if selector__.is_some() { - return Err(serde::de::Error::duplicate_field("selector")); - } - selector__ = Some(map.next_value()?); - } - GeneratedField::Body => { - if body__.is_some() { - return Err(serde::de::Error::duplicate_field("body")); - } - body__ = Some(map.next_value()?); - } - GeneratedField::ResponseBody => { - if response_body__.is_some() { - return Err(serde::de::Error::duplicate_field("responseBody")); - } - response_body__ = Some(map.next_value()?); - } - GeneratedField::AdditionalBindings => { - if additional_bindings__.is_some() { - return Err(serde::de::Error::duplicate_field("additionalBindings")); - } - additional_bindings__ = Some(map.next_value()?); - } - GeneratedField::Get => { - if pattern__.is_some() { - return Err(serde::de::Error::duplicate_field("get")); - } - pattern__ = map.next_value::<::std::option::Option<_>>()?.map(http_rule::Pattern::Get); - } - GeneratedField::Put => { - if pattern__.is_some() { - return Err(serde::de::Error::duplicate_field("put")); - } - pattern__ = map.next_value::<::std::option::Option<_>>()?.map(http_rule::Pattern::Put); - } - GeneratedField::Post => { - if pattern__.is_some() { - return Err(serde::de::Error::duplicate_field("post")); - } - pattern__ = map.next_value::<::std::option::Option<_>>()?.map(http_rule::Pattern::Post); - } - GeneratedField::Delete => { - if pattern__.is_some() { - return Err(serde::de::Error::duplicate_field("delete")); - } - pattern__ = map.next_value::<::std::option::Option<_>>()?.map(http_rule::Pattern::Delete); - } - GeneratedField::Patch => { - if pattern__.is_some() { - return Err(serde::de::Error::duplicate_field("patch")); - } - pattern__ = map.next_value::<::std::option::Option<_>>()?.map(http_rule::Pattern::Patch); - } - GeneratedField::Custom => { - if pattern__.is_some() { - return Err(serde::de::Error::duplicate_field("custom")); - } - pattern__ = map.next_value::<::std::option::Option<_>>()?.map(http_rule::Pattern::Custom) -; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(HttpRule { - selector: selector__.unwrap_or_default(), - body: body__.unwrap_or_default(), - response_body: response_body__.unwrap_or_default(), - additional_bindings: additional_bindings__.unwrap_or_default(), - pattern: pattern__, - }) - } - } - deserializer.deserialize_struct("google.api.HttpRule", FIELDS, GeneratedVisitor) - } -} diff --git a/nexus/pt/src/peerdb_flow.rs b/nexus/pt/src/peerdb_flow.rs deleted file mode 100644 index 6a6c65c138..0000000000 --- a/nexus/pt/src/peerdb_flow.rs +++ /dev/null @@ -1,645 +0,0 @@ -// @generated -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TableNameMapping { - #[prost(string, tag="1")] - pub source_table_name: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub destination_table_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RelationMessageColumn { - #[prost(uint32, tag="1")] - pub flags: u32, - #[prost(string, tag="2")] - pub name: ::prost::alloc::string::String, - #[prost(uint32, tag="3")] - pub data_type: u32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RelationMessage { - #[prost(uint32, tag="1")] - pub relation_id: u32, - #[prost(string, tag="2")] - pub relation_name: ::prost::alloc::string::String, - #[prost(message, repeated, tag="3")] - pub columns: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TableMapping { - #[prost(string, tag="1")] - pub source_table_identifier: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub destination_table_identifier: ::prost::alloc::string::String, - #[prost(string, tag="3")] - pub partition_key: ::prost::alloc::string::String, - #[prost(string, repeated, tag="4")] - pub exclude: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SetupInput { - #[prost(message, optional, tag="1")] - pub peer: ::core::option::Option, - #[prost(string, tag="2")] - pub flow_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FlowConnectionConfigs { - #[prost(message, optional, tag="1")] - pub source: ::core::option::Option, - #[prost(message, optional, tag="2")] - pub destination: ::core::option::Option, - #[prost(string, tag="3")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(message, optional, tag="4")] - pub table_schema: ::core::option::Option, - #[prost(message, repeated, tag="5")] - pub table_mappings: ::prost::alloc::vec::Vec, - #[prost(map="uint32, string", tag="6")] - pub src_table_id_name_mapping: ::std::collections::HashMap, - #[prost(map="string, message", tag="7")] - pub table_name_schema_mapping: ::std::collections::HashMap<::prost::alloc::string::String, TableSchema>, - /// This is an optional peer that will be used to hold metadata in cases where - /// the destination isn't ideal for holding metadata. - #[prost(message, optional, tag="8")] - pub metadata_peer: ::core::option::Option, - #[prost(uint32, tag="9")] - pub max_batch_size: u32, - #[prost(bool, tag="10")] - pub do_initial_copy: bool, - #[prost(string, tag="11")] - pub publication_name: ::prost::alloc::string::String, - #[prost(uint32, tag="12")] - pub snapshot_num_rows_per_partition: u32, - /// max parallel workers is per table - #[prost(uint32, tag="13")] - pub snapshot_max_parallel_workers: u32, - #[prost(uint32, tag="14")] - pub snapshot_num_tables_in_parallel: u32, - #[prost(enumeration="QRepSyncMode", tag="15")] - pub snapshot_sync_mode: i32, - #[prost(enumeration="QRepSyncMode", tag="16")] - pub cdc_sync_mode: i32, - #[prost(string, tag="17")] - pub snapshot_staging_path: ::prost::alloc::string::String, - #[prost(string, tag="18")] - pub cdc_staging_path: ::prost::alloc::string::String, - /// currently only works for snowflake - #[prost(bool, tag="19")] - pub soft_delete: bool, - #[prost(string, tag="20")] - pub replication_slot_name: ::prost::alloc::string::String, - /// the below two are for eventhub only - #[prost(int64, tag="21")] - pub push_batch_size: i64, - #[prost(int64, tag="22")] - pub push_parallelism: i64, - /// if true, then the flow will be resynced - /// create new tables with "_resync" suffix, perform initial load and then swap the new tables with the old ones - /// to be used after the old mirror is dropped - #[prost(bool, tag="23")] - pub resync: bool, - #[prost(string, tag="24")] - pub soft_delete_col_name: ::prost::alloc::string::String, - #[prost(string, tag="25")] - pub synced_at_col_name: ::prost::alloc::string::String, - #[prost(bool, tag="26")] - pub initial_copy_only: bool, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RenameTableOption { - #[prost(string, tag="1")] - pub current_name: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub new_name: ::prost::alloc::string::String, - #[prost(message, optional, tag="3")] - pub table_schema: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RenameTablesInput { - #[prost(string, tag="1")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] - pub peer: ::core::option::Option, - #[prost(message, repeated, tag="3")] - pub rename_table_options: ::prost::alloc::vec::Vec, - #[prost(string, optional, tag="4")] - pub soft_delete_col_name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag="5")] - pub synced_at_col_name: ::core::option::Option<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RenameTablesOutput { - #[prost(string, tag="1")] - pub flow_job_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateTablesFromExistingInput { - #[prost(string, tag="1")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] - pub peer: ::core::option::Option, - #[prost(map="string, string", tag="3")] - pub new_to_existing_table_mapping: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateTablesFromExistingOutput { - #[prost(string, tag="2")] - pub flow_job_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncFlowOptions { - #[prost(int32, tag="1")] - pub batch_size: i32, - #[prost(map="uint32, message", tag="2")] - pub relation_message_mapping: ::std::collections::HashMap, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NormalizeFlowOptions { - #[prost(int32, tag="1")] - pub batch_size: i32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LastSyncState { - #[prost(int64, tag="1")] - pub checkpoint: i64, - #[prost(message, optional, tag="2")] - pub last_synced_at: ::core::option::Option<::pbjson_types::Timestamp>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StartFlowInput { - #[prost(message, optional, tag="1")] - pub last_sync_state: ::core::option::Option, - #[prost(message, optional, tag="2")] - pub flow_connection_configs: ::core::option::Option, - #[prost(message, optional, tag="3")] - pub sync_flow_options: ::core::option::Option, - #[prost(map="uint32, message", tag="4")] - pub relation_message_mapping: ::std::collections::HashMap, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StartNormalizeInput { - #[prost(message, optional, tag="1")] - pub flow_connection_configs: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetLastSyncedIdInput { - #[prost(message, optional, tag="1")] - pub peer_connection_config: ::core::option::Option, - #[prost(string, tag="2")] - pub flow_job_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EnsurePullabilityInput { - #[prost(message, optional, tag="1")] - pub peer_connection_config: ::core::option::Option, - #[prost(string, tag="2")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(string, tag="3")] - pub source_table_identifier: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EnsurePullabilityBatchInput { - #[prost(message, optional, tag="1")] - pub peer_connection_config: ::core::option::Option, - #[prost(string, tag="2")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(string, repeated, tag="3")] - pub source_table_identifiers: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PostgresTableIdentifier { - #[prost(uint32, tag="1")] - pub rel_id: u32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TableIdentifier { - #[prost(oneof="table_identifier::TableIdentifier", tags="1")] - pub table_identifier: ::core::option::Option, -} -/// Nested message and enum types in `TableIdentifier`. -pub mod table_identifier { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum TableIdentifier { - #[prost(message, tag="1")] - PostgresTableIdentifier(super::PostgresTableIdentifier), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EnsurePullabilityOutput { - #[prost(message, optional, tag="1")] - pub table_identifier: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EnsurePullabilityBatchOutput { - #[prost(map="string, message", tag="1")] - pub table_identifier_mapping: ::std::collections::HashMap<::prost::alloc::string::String, TableIdentifier>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SetupReplicationInput { - #[prost(message, optional, tag="1")] - pub peer_connection_config: ::core::option::Option, - #[prost(string, tag="2")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(map="string, string", tag="3")] - pub table_name_mapping: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// replicate to destination using ctid - #[prost(message, optional, tag="4")] - pub destination_peer: ::core::option::Option, - #[prost(bool, tag="5")] - pub do_initial_copy: bool, - #[prost(string, tag="6")] - pub existing_publication_name: ::prost::alloc::string::String, - #[prost(string, tag="7")] - pub existing_replication_slot_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SetupReplicationOutput { - #[prost(string, tag="1")] - pub slot_name: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub snapshot_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateRawTableInput { - #[prost(message, optional, tag="1")] - pub peer_connection_config: ::core::option::Option, - #[prost(string, tag="2")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(map="string, string", tag="3")] - pub table_name_mapping: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - #[prost(enumeration="QRepSyncMode", tag="4")] - pub cdc_sync_mode: i32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateRawTableOutput { - #[prost(string, tag="1")] - pub table_identifier: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TableSchema { - #[prost(string, tag="1")] - pub table_identifier: ::prost::alloc::string::String, - /// DEPRECATED: eliminate when breaking changes are allowed. - #[prost(map="string, string", tag="2")] - pub columns: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - #[prost(string, repeated, tag="3")] - pub primary_key_columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(bool, tag="4")] - pub is_replica_identity_full: bool, - #[prost(string, repeated, tag="5")] - pub column_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(string, repeated, tag="6")] - pub column_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTableSchemaBatchInput { - #[prost(message, optional, tag="1")] - pub peer_connection_config: ::core::option::Option, - #[prost(string, repeated, tag="2")] - pub table_identifiers: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(string, tag="3")] - pub flow_name: ::prost::alloc::string::String, - #[prost(bool, tag="4")] - pub skip_pkey_and_replica_check: bool, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTableSchemaBatchOutput { - #[prost(map="string, message", tag="1")] - pub table_name_schema_mapping: ::std::collections::HashMap<::prost::alloc::string::String, TableSchema>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SetupNormalizedTableInput { - #[prost(message, optional, tag="1")] - pub peer_connection_config: ::core::option::Option, - #[prost(string, tag="2")] - pub table_identifier: ::prost::alloc::string::String, - #[prost(message, optional, tag="3")] - pub source_table_schema: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SetupNormalizedTableBatchInput { - #[prost(message, optional, tag="1")] - pub peer_connection_config: ::core::option::Option, - #[prost(map="string, message", tag="2")] - pub table_name_schema_mapping: ::std::collections::HashMap<::prost::alloc::string::String, TableSchema>, - /// migration related columns - #[prost(string, tag="4")] - pub soft_delete_col_name: ::prost::alloc::string::String, - #[prost(string, tag="5")] - pub synced_at_col_name: ::prost::alloc::string::String, - #[prost(string, tag="6")] - pub flow_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SetupNormalizedTableOutput { - #[prost(string, tag="1")] - pub table_identifier: ::prost::alloc::string::String, - #[prost(bool, tag="2")] - pub already_exists: bool, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SetupNormalizedTableBatchOutput { - #[prost(map="string, bool", tag="1")] - pub table_exists_mapping: ::std::collections::HashMap<::prost::alloc::string::String, bool>, -} -/// partition ranges [start, end] inclusive -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct IntPartitionRange { - #[prost(int64, tag="1")] - pub start: i64, - #[prost(int64, tag="2")] - pub end: i64, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TimestampPartitionRange { - #[prost(message, optional, tag="1")] - pub start: ::core::option::Option<::pbjson_types::Timestamp>, - #[prost(message, optional, tag="2")] - pub end: ::core::option::Option<::pbjson_types::Timestamp>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Tid { - #[prost(uint32, tag="1")] - pub block_number: u32, - #[prost(uint32, tag="2")] - pub offset_number: u32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TidPartitionRange { - #[prost(message, optional, tag="1")] - pub start: ::core::option::Option, - #[prost(message, optional, tag="2")] - pub end: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PartitionRange { - /// can be a timestamp range or an integer range - #[prost(oneof="partition_range::Range", tags="1, 2, 3")] - pub range: ::core::option::Option, -} -/// Nested message and enum types in `PartitionRange`. -pub mod partition_range { - /// can be a timestamp range or an integer range - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Range { - #[prost(message, tag="1")] - IntRange(super::IntPartitionRange), - #[prost(message, tag="2")] - TimestampRange(super::TimestampPartitionRange), - #[prost(message, tag="3")] - TidRange(super::TidPartitionRange), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QRepWriteMode { - #[prost(enumeration="QRepWriteType", tag="1")] - pub write_type: i32, - #[prost(string, repeated, tag="2")] - pub upsert_key_columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QRepConfig { - #[prost(string, tag="1")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] - pub source_peer: ::core::option::Option, - #[prost(message, optional, tag="3")] - pub destination_peer: ::core::option::Option, - #[prost(string, tag="4")] - pub destination_table_identifier: ::prost::alloc::string::String, - #[prost(string, tag="5")] - pub query: ::prost::alloc::string::String, - #[prost(string, tag="6")] - pub watermark_table: ::prost::alloc::string::String, - #[prost(string, tag="7")] - pub watermark_column: ::prost::alloc::string::String, - #[prost(bool, tag="8")] - pub initial_copy_only: bool, - #[prost(enumeration="QRepSyncMode", tag="9")] - pub sync_mode: i32, - /// DEPRECATED: eliminate when breaking changes are allowed. - #[prost(uint32, tag="10")] - pub batch_size_int: u32, - /// DEPRECATED: eliminate when breaking changes are allowed. - #[prost(uint32, tag="11")] - pub batch_duration_seconds: u32, - #[prost(uint32, tag="12")] - pub max_parallel_workers: u32, - /// time to wait between getting partitions to process - #[prost(uint32, tag="13")] - pub wait_between_batches_seconds: u32, - #[prost(message, optional, tag="14")] - pub write_mode: ::core::option::Option, - /// This is only used when sync_mode is AVRO - /// this is the location where the avro files will be written - /// if this starts with gs:// then it will be written to GCS - /// if this starts with s3:// then it will be written to S3, only supported in Snowflake - /// if nothing is specified then it will be written to local disk - /// if using GCS or S3 make sure your instance has the correct permissions. - #[prost(string, tag="15")] - pub staging_path: ::prost::alloc::string::String, - /// This setting overrides batch_size_int and batch_duration_seconds - /// and instead uses the number of rows per partition to determine - /// how many rows to process per batch. - #[prost(uint32, tag="16")] - pub num_rows_per_partition: u32, - /// Creates the watermark table on the destination as-is, can be used for some queries. - #[prost(bool, tag="17")] - pub setup_watermark_table_on_destination: bool, - /// create new tables with "_peerdb_resync" suffix, perform initial load and then swap the new table with the old ones - /// to be used after the old mirror is dropped - #[prost(bool, tag="18")] - pub dst_table_full_resync: bool, - #[prost(string, tag="19")] - pub synced_at_col_name: ::prost::alloc::string::String, - #[prost(string, tag="20")] - pub soft_delete_col_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QRepPartition { - #[prost(string, tag="2")] - pub partition_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="3")] - pub range: ::core::option::Option, - #[prost(bool, tag="4")] - pub full_table_partition: bool, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QRepPartitionBatch { - #[prost(int32, tag="1")] - pub batch_id: i32, - #[prost(message, repeated, tag="2")] - pub partitions: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QRepParitionResult { - #[prost(message, repeated, tag="1")] - pub partitions: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DropFlowInput { - #[prost(string, tag="1")] - pub flow_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeltaAddedColumn { - #[prost(string, tag="1")] - pub column_name: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub column_type: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TableSchemaDelta { - #[prost(string, tag="1")] - pub src_table_name: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub dst_table_name: ::prost::alloc::string::String, - #[prost(message, repeated, tag="3")] - pub added_columns: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ReplayTableSchemaDeltaInput { - #[prost(message, optional, tag="1")] - pub flow_connection_configs: ::core::option::Option, - #[prost(message, repeated, tag="2")] - pub table_schema_deltas: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QRepFlowState { - #[prost(message, optional, tag="1")] - pub last_partition: ::core::option::Option, - #[prost(uint64, tag="2")] - pub num_partitions_processed: u64, - #[prost(bool, tag="3")] - pub needs_resync: bool, - #[prost(bool, tag="4")] - pub disable_wait_for_new_rows: bool, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PeerDbColumns { - #[prost(string, tag="1")] - pub soft_delete_col_name: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub synced_at_col_name: ::prost::alloc::string::String, - #[prost(bool, tag="3")] - pub soft_delete: bool, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetOpenConnectionsForUserResult { - #[prost(string, tag="1")] - pub user_name: ::prost::alloc::string::String, - #[prost(int64, tag="2")] - pub current_open_connections: i64, -} -/// protos for qrep -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum QRepSyncMode { - QrepSyncModeMultiInsert = 0, - QrepSyncModeStorageAvro = 1, -} -impl QRepSyncMode { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - QRepSyncMode::QrepSyncModeMultiInsert => "QREP_SYNC_MODE_MULTI_INSERT", - QRepSyncMode::QrepSyncModeStorageAvro => "QREP_SYNC_MODE_STORAGE_AVRO", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "QREP_SYNC_MODE_MULTI_INSERT" => Some(Self::QrepSyncModeMultiInsert), - "QREP_SYNC_MODE_STORAGE_AVRO" => Some(Self::QrepSyncModeStorageAvro), - _ => None, - } - } -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum QRepWriteType { - QrepWriteModeAppend = 0, - QrepWriteModeUpsert = 1, - /// only valid when initial_copy_true is set to true. TRUNCATES tables before reverting to APPEND. - QrepWriteModeOverwrite = 2, -} -impl QRepWriteType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - QRepWriteType::QrepWriteModeAppend => "QREP_WRITE_MODE_APPEND", - QRepWriteType::QrepWriteModeUpsert => "QREP_WRITE_MODE_UPSERT", - QRepWriteType::QrepWriteModeOverwrite => "QREP_WRITE_MODE_OVERWRITE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "QREP_WRITE_MODE_APPEND" => Some(Self::QrepWriteModeAppend), - "QREP_WRITE_MODE_UPSERT" => Some(Self::QrepWriteModeUpsert), - "QREP_WRITE_MODE_OVERWRITE" => Some(Self::QrepWriteModeOverwrite), - _ => None, - } - } -} -include!("peerdb_flow.serde.rs"); -// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/nexus/pt/src/peerdb_flow.serde.rs b/nexus/pt/src/peerdb_flow.serde.rs deleted file mode 100644 index c047041ae0..0000000000 --- a/nexus/pt/src/peerdb_flow.serde.rs +++ /dev/null @@ -1,7201 +0,0 @@ -// @generated -impl serde::Serialize for CreateRawTableInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer_connection_config.is_some() { - len += 1; - } - if !self.flow_job_name.is_empty() { - len += 1; - } - if !self.table_name_mapping.is_empty() { - len += 1; - } - if self.cdc_sync_mode != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.CreateRawTableInput", len)?; - if let Some(v) = self.peer_connection_config.as_ref() { - struct_ser.serialize_field("peerConnectionConfig", v)?; - } - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if !self.table_name_mapping.is_empty() { - struct_ser.serialize_field("tableNameMapping", &self.table_name_mapping)?; - } - if self.cdc_sync_mode != 0 { - let v = QRepSyncMode::from_i32(self.cdc_sync_mode) - .ok_or_else(|| serde::ser::Error::custom(format!("Invalid variant {}", self.cdc_sync_mode)))?; - struct_ser.serialize_field("cdcSyncMode", &v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreateRawTableInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_connection_config", - "peerConnectionConfig", - "flow_job_name", - "flowJobName", - "table_name_mapping", - "tableNameMapping", - "cdc_sync_mode", - "cdcSyncMode", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerConnectionConfig, - FlowJobName, - TableNameMapping, - CdcSyncMode, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerConnectionConfig" | "peer_connection_config" => Ok(GeneratedField::PeerConnectionConfig), - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "tableNameMapping" | "table_name_mapping" => Ok(GeneratedField::TableNameMapping), - "cdcSyncMode" | "cdc_sync_mode" => Ok(GeneratedField::CdcSyncMode), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreateRawTableInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.CreateRawTableInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_connection_config__ = None; - let mut flow_job_name__ = None; - let mut table_name_mapping__ = None; - let mut cdc_sync_mode__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerConnectionConfig => { - if peer_connection_config__.is_some() { - return Err(serde::de::Error::duplicate_field("peerConnectionConfig")); - } - peer_connection_config__ = map.next_value()?; - } - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::TableNameMapping => { - if table_name_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("tableNameMapping")); - } - table_name_mapping__ = Some( - map.next_value::>()? - ); - } - GeneratedField::CdcSyncMode => { - if cdc_sync_mode__.is_some() { - return Err(serde::de::Error::duplicate_field("cdcSyncMode")); - } - cdc_sync_mode__ = Some(map.next_value::()? as i32); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreateRawTableInput { - peer_connection_config: peer_connection_config__, - flow_job_name: flow_job_name__.unwrap_or_default(), - table_name_mapping: table_name_mapping__.unwrap_or_default(), - cdc_sync_mode: cdc_sync_mode__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.CreateRawTableInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CreateRawTableOutput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.table_identifier.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.CreateRawTableOutput", len)?; - if !self.table_identifier.is_empty() { - struct_ser.serialize_field("tableIdentifier", &self.table_identifier)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreateRawTableOutput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "table_identifier", - "tableIdentifier", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - TableIdentifier, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "tableIdentifier" | "table_identifier" => Ok(GeneratedField::TableIdentifier), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreateRawTableOutput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.CreateRawTableOutput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut table_identifier__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::TableIdentifier => { - if table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("tableIdentifier")); - } - table_identifier__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreateRawTableOutput { - table_identifier: table_identifier__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.CreateRawTableOutput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CreateTablesFromExistingInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.flow_job_name.is_empty() { - len += 1; - } - if self.peer.is_some() { - len += 1; - } - if !self.new_to_existing_table_mapping.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.CreateTablesFromExistingInput", len)?; - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if let Some(v) = self.peer.as_ref() { - struct_ser.serialize_field("peer", v)?; - } - if !self.new_to_existing_table_mapping.is_empty() { - struct_ser.serialize_field("newToExistingTableMapping", &self.new_to_existing_table_mapping)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreateTablesFromExistingInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_job_name", - "flowJobName", - "peer", - "new_to_existing_table_mapping", - "newToExistingTableMapping", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowJobName, - Peer, - NewToExistingTableMapping, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "peer" => Ok(GeneratedField::Peer), - "newToExistingTableMapping" | "new_to_existing_table_mapping" => Ok(GeneratedField::NewToExistingTableMapping), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreateTablesFromExistingInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.CreateTablesFromExistingInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_job_name__ = None; - let mut peer__ = None; - let mut new_to_existing_table_mapping__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::Peer => { - if peer__.is_some() { - return Err(serde::de::Error::duplicate_field("peer")); - } - peer__ = map.next_value()?; - } - GeneratedField::NewToExistingTableMapping => { - if new_to_existing_table_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("newToExistingTableMapping")); - } - new_to_existing_table_mapping__ = Some( - map.next_value::>()? - ); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreateTablesFromExistingInput { - flow_job_name: flow_job_name__.unwrap_or_default(), - peer: peer__, - new_to_existing_table_mapping: new_to_existing_table_mapping__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.CreateTablesFromExistingInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CreateTablesFromExistingOutput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.flow_job_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.CreateTablesFromExistingOutput", len)?; - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreateTablesFromExistingOutput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_job_name", - "flowJobName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowJobName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreateTablesFromExistingOutput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.CreateTablesFromExistingOutput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_job_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreateTablesFromExistingOutput { - flow_job_name: flow_job_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.CreateTablesFromExistingOutput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for DeltaAddedColumn { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.column_name.is_empty() { - len += 1; - } - if !self.column_type.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.DeltaAddedColumn", len)?; - if !self.column_name.is_empty() { - struct_ser.serialize_field("columnName", &self.column_name)?; - } - if !self.column_type.is_empty() { - struct_ser.serialize_field("columnType", &self.column_type)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for DeltaAddedColumn { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "column_name", - "columnName", - "column_type", - "columnType", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - ColumnName, - ColumnType, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "columnName" | "column_name" => Ok(GeneratedField::ColumnName), - "columnType" | "column_type" => Ok(GeneratedField::ColumnType), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = DeltaAddedColumn; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.DeltaAddedColumn") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut column_name__ = None; - let mut column_type__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::ColumnName => { - if column_name__.is_some() { - return Err(serde::de::Error::duplicate_field("columnName")); - } - column_name__ = Some(map.next_value()?); - } - GeneratedField::ColumnType => { - if column_type__.is_some() { - return Err(serde::de::Error::duplicate_field("columnType")); - } - column_type__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(DeltaAddedColumn { - column_name: column_name__.unwrap_or_default(), - column_type: column_type__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.DeltaAddedColumn", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for DropFlowInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.flow_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.DropFlowInput", len)?; - if !self.flow_name.is_empty() { - struct_ser.serialize_field("flowName", &self.flow_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for DropFlowInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_name", - "flowName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowName" | "flow_name" => Ok(GeneratedField::FlowName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = DropFlowInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.DropFlowInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowName => { - if flow_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowName")); - } - flow_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(DropFlowInput { - flow_name: flow_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.DropFlowInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for EnsurePullabilityBatchInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer_connection_config.is_some() { - len += 1; - } - if !self.flow_job_name.is_empty() { - len += 1; - } - if !self.source_table_identifiers.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.EnsurePullabilityBatchInput", len)?; - if let Some(v) = self.peer_connection_config.as_ref() { - struct_ser.serialize_field("peerConnectionConfig", v)?; - } - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if !self.source_table_identifiers.is_empty() { - struct_ser.serialize_field("sourceTableIdentifiers", &self.source_table_identifiers)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for EnsurePullabilityBatchInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_connection_config", - "peerConnectionConfig", - "flow_job_name", - "flowJobName", - "source_table_identifiers", - "sourceTableIdentifiers", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerConnectionConfig, - FlowJobName, - SourceTableIdentifiers, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerConnectionConfig" | "peer_connection_config" => Ok(GeneratedField::PeerConnectionConfig), - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "sourceTableIdentifiers" | "source_table_identifiers" => Ok(GeneratedField::SourceTableIdentifiers), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = EnsurePullabilityBatchInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.EnsurePullabilityBatchInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_connection_config__ = None; - let mut flow_job_name__ = None; - let mut source_table_identifiers__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerConnectionConfig => { - if peer_connection_config__.is_some() { - return Err(serde::de::Error::duplicate_field("peerConnectionConfig")); - } - peer_connection_config__ = map.next_value()?; - } - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::SourceTableIdentifiers => { - if source_table_identifiers__.is_some() { - return Err(serde::de::Error::duplicate_field("sourceTableIdentifiers")); - } - source_table_identifiers__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(EnsurePullabilityBatchInput { - peer_connection_config: peer_connection_config__, - flow_job_name: flow_job_name__.unwrap_or_default(), - source_table_identifiers: source_table_identifiers__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.EnsurePullabilityBatchInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for EnsurePullabilityBatchOutput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.table_identifier_mapping.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.EnsurePullabilityBatchOutput", len)?; - if !self.table_identifier_mapping.is_empty() { - struct_ser.serialize_field("tableIdentifierMapping", &self.table_identifier_mapping)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for EnsurePullabilityBatchOutput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "table_identifier_mapping", - "tableIdentifierMapping", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - TableIdentifierMapping, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "tableIdentifierMapping" | "table_identifier_mapping" => Ok(GeneratedField::TableIdentifierMapping), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = EnsurePullabilityBatchOutput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.EnsurePullabilityBatchOutput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut table_identifier_mapping__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::TableIdentifierMapping => { - if table_identifier_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("tableIdentifierMapping")); - } - table_identifier_mapping__ = Some( - map.next_value::>()? - ); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(EnsurePullabilityBatchOutput { - table_identifier_mapping: table_identifier_mapping__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.EnsurePullabilityBatchOutput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for EnsurePullabilityInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer_connection_config.is_some() { - len += 1; - } - if !self.flow_job_name.is_empty() { - len += 1; - } - if !self.source_table_identifier.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.EnsurePullabilityInput", len)?; - if let Some(v) = self.peer_connection_config.as_ref() { - struct_ser.serialize_field("peerConnectionConfig", v)?; - } - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if !self.source_table_identifier.is_empty() { - struct_ser.serialize_field("sourceTableIdentifier", &self.source_table_identifier)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for EnsurePullabilityInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_connection_config", - "peerConnectionConfig", - "flow_job_name", - "flowJobName", - "source_table_identifier", - "sourceTableIdentifier", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerConnectionConfig, - FlowJobName, - SourceTableIdentifier, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerConnectionConfig" | "peer_connection_config" => Ok(GeneratedField::PeerConnectionConfig), - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "sourceTableIdentifier" | "source_table_identifier" => Ok(GeneratedField::SourceTableIdentifier), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = EnsurePullabilityInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.EnsurePullabilityInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_connection_config__ = None; - let mut flow_job_name__ = None; - let mut source_table_identifier__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerConnectionConfig => { - if peer_connection_config__.is_some() { - return Err(serde::de::Error::duplicate_field("peerConnectionConfig")); - } - peer_connection_config__ = map.next_value()?; - } - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::SourceTableIdentifier => { - if source_table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("sourceTableIdentifier")); - } - source_table_identifier__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(EnsurePullabilityInput { - peer_connection_config: peer_connection_config__, - flow_job_name: flow_job_name__.unwrap_or_default(), - source_table_identifier: source_table_identifier__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.EnsurePullabilityInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for EnsurePullabilityOutput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.table_identifier.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.EnsurePullabilityOutput", len)?; - if let Some(v) = self.table_identifier.as_ref() { - struct_ser.serialize_field("tableIdentifier", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for EnsurePullabilityOutput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "table_identifier", - "tableIdentifier", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - TableIdentifier, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "tableIdentifier" | "table_identifier" => Ok(GeneratedField::TableIdentifier), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = EnsurePullabilityOutput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.EnsurePullabilityOutput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut table_identifier__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::TableIdentifier => { - if table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("tableIdentifier")); - } - table_identifier__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(EnsurePullabilityOutput { - table_identifier: table_identifier__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.EnsurePullabilityOutput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for FlowConnectionConfigs { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.source.is_some() { - len += 1; - } - if self.destination.is_some() { - len += 1; - } - if !self.flow_job_name.is_empty() { - len += 1; - } - if self.table_schema.is_some() { - len += 1; - } - if !self.table_mappings.is_empty() { - len += 1; - } - if !self.src_table_id_name_mapping.is_empty() { - len += 1; - } - if !self.table_name_schema_mapping.is_empty() { - len += 1; - } - if self.metadata_peer.is_some() { - len += 1; - } - if self.max_batch_size != 0 { - len += 1; - } - if self.do_initial_copy { - len += 1; - } - if !self.publication_name.is_empty() { - len += 1; - } - if self.snapshot_num_rows_per_partition != 0 { - len += 1; - } - if self.snapshot_max_parallel_workers != 0 { - len += 1; - } - if self.snapshot_num_tables_in_parallel != 0 { - len += 1; - } - if self.snapshot_sync_mode != 0 { - len += 1; - } - if self.cdc_sync_mode != 0 { - len += 1; - } - if !self.snapshot_staging_path.is_empty() { - len += 1; - } - if !self.cdc_staging_path.is_empty() { - len += 1; - } - if self.soft_delete { - len += 1; - } - if !self.replication_slot_name.is_empty() { - len += 1; - } - if self.push_batch_size != 0 { - len += 1; - } - if self.push_parallelism != 0 { - len += 1; - } - if self.resync { - len += 1; - } - if !self.soft_delete_col_name.is_empty() { - len += 1; - } - if !self.synced_at_col_name.is_empty() { - len += 1; - } - if self.initial_copy_only { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.FlowConnectionConfigs", len)?; - if let Some(v) = self.source.as_ref() { - struct_ser.serialize_field("source", v)?; - } - if let Some(v) = self.destination.as_ref() { - struct_ser.serialize_field("destination", v)?; - } - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if let Some(v) = self.table_schema.as_ref() { - struct_ser.serialize_field("tableSchema", v)?; - } - if !self.table_mappings.is_empty() { - struct_ser.serialize_field("tableMappings", &self.table_mappings)?; - } - if !self.src_table_id_name_mapping.is_empty() { - struct_ser.serialize_field("srcTableIdNameMapping", &self.src_table_id_name_mapping)?; - } - if !self.table_name_schema_mapping.is_empty() { - struct_ser.serialize_field("tableNameSchemaMapping", &self.table_name_schema_mapping)?; - } - if let Some(v) = self.metadata_peer.as_ref() { - struct_ser.serialize_field("metadataPeer", v)?; - } - if self.max_batch_size != 0 { - struct_ser.serialize_field("maxBatchSize", &self.max_batch_size)?; - } - if self.do_initial_copy { - struct_ser.serialize_field("doInitialCopy", &self.do_initial_copy)?; - } - if !self.publication_name.is_empty() { - struct_ser.serialize_field("publicationName", &self.publication_name)?; - } - if self.snapshot_num_rows_per_partition != 0 { - struct_ser.serialize_field("snapshotNumRowsPerPartition", &self.snapshot_num_rows_per_partition)?; - } - if self.snapshot_max_parallel_workers != 0 { - struct_ser.serialize_field("snapshotMaxParallelWorkers", &self.snapshot_max_parallel_workers)?; - } - if self.snapshot_num_tables_in_parallel != 0 { - struct_ser.serialize_field("snapshotNumTablesInParallel", &self.snapshot_num_tables_in_parallel)?; - } - if self.snapshot_sync_mode != 0 { - let v = QRepSyncMode::from_i32(self.snapshot_sync_mode) - .ok_or_else(|| serde::ser::Error::custom(format!("Invalid variant {}", self.snapshot_sync_mode)))?; - struct_ser.serialize_field("snapshotSyncMode", &v)?; - } - if self.cdc_sync_mode != 0 { - let v = QRepSyncMode::from_i32(self.cdc_sync_mode) - .ok_or_else(|| serde::ser::Error::custom(format!("Invalid variant {}", self.cdc_sync_mode)))?; - struct_ser.serialize_field("cdcSyncMode", &v)?; - } - if !self.snapshot_staging_path.is_empty() { - struct_ser.serialize_field("snapshotStagingPath", &self.snapshot_staging_path)?; - } - if !self.cdc_staging_path.is_empty() { - struct_ser.serialize_field("cdcStagingPath", &self.cdc_staging_path)?; - } - if self.soft_delete { - struct_ser.serialize_field("softDelete", &self.soft_delete)?; - } - if !self.replication_slot_name.is_empty() { - struct_ser.serialize_field("replicationSlotName", &self.replication_slot_name)?; - } - if self.push_batch_size != 0 { - struct_ser.serialize_field("pushBatchSize", ToString::to_string(&self.push_batch_size).as_str())?; - } - if self.push_parallelism != 0 { - struct_ser.serialize_field("pushParallelism", ToString::to_string(&self.push_parallelism).as_str())?; - } - if self.resync { - struct_ser.serialize_field("resync", &self.resync)?; - } - if !self.soft_delete_col_name.is_empty() { - struct_ser.serialize_field("softDeleteColName", &self.soft_delete_col_name)?; - } - if !self.synced_at_col_name.is_empty() { - struct_ser.serialize_field("syncedAtColName", &self.synced_at_col_name)?; - } - if self.initial_copy_only { - struct_ser.serialize_field("initialCopyOnly", &self.initial_copy_only)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for FlowConnectionConfigs { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "source", - "destination", - "flow_job_name", - "flowJobName", - "table_schema", - "tableSchema", - "table_mappings", - "tableMappings", - "src_table_id_name_mapping", - "srcTableIdNameMapping", - "table_name_schema_mapping", - "tableNameSchemaMapping", - "metadata_peer", - "metadataPeer", - "max_batch_size", - "maxBatchSize", - "do_initial_copy", - "doInitialCopy", - "publication_name", - "publicationName", - "snapshot_num_rows_per_partition", - "snapshotNumRowsPerPartition", - "snapshot_max_parallel_workers", - "snapshotMaxParallelWorkers", - "snapshot_num_tables_in_parallel", - "snapshotNumTablesInParallel", - "snapshot_sync_mode", - "snapshotSyncMode", - "cdc_sync_mode", - "cdcSyncMode", - "snapshot_staging_path", - "snapshotStagingPath", - "cdc_staging_path", - "cdcStagingPath", - "soft_delete", - "softDelete", - "replication_slot_name", - "replicationSlotName", - "push_batch_size", - "pushBatchSize", - "push_parallelism", - "pushParallelism", - "resync", - "soft_delete_col_name", - "softDeleteColName", - "synced_at_col_name", - "syncedAtColName", - "initial_copy_only", - "initialCopyOnly", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Source, - Destination, - FlowJobName, - TableSchema, - TableMappings, - SrcTableIdNameMapping, - TableNameSchemaMapping, - MetadataPeer, - MaxBatchSize, - DoInitialCopy, - PublicationName, - SnapshotNumRowsPerPartition, - SnapshotMaxParallelWorkers, - SnapshotNumTablesInParallel, - SnapshotSyncMode, - CdcSyncMode, - SnapshotStagingPath, - CdcStagingPath, - SoftDelete, - ReplicationSlotName, - PushBatchSize, - PushParallelism, - Resync, - SoftDeleteColName, - SyncedAtColName, - InitialCopyOnly, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "source" => Ok(GeneratedField::Source), - "destination" => Ok(GeneratedField::Destination), - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "tableSchema" | "table_schema" => Ok(GeneratedField::TableSchema), - "tableMappings" | "table_mappings" => Ok(GeneratedField::TableMappings), - "srcTableIdNameMapping" | "src_table_id_name_mapping" => Ok(GeneratedField::SrcTableIdNameMapping), - "tableNameSchemaMapping" | "table_name_schema_mapping" => Ok(GeneratedField::TableNameSchemaMapping), - "metadataPeer" | "metadata_peer" => Ok(GeneratedField::MetadataPeer), - "maxBatchSize" | "max_batch_size" => Ok(GeneratedField::MaxBatchSize), - "doInitialCopy" | "do_initial_copy" => Ok(GeneratedField::DoInitialCopy), - "publicationName" | "publication_name" => Ok(GeneratedField::PublicationName), - "snapshotNumRowsPerPartition" | "snapshot_num_rows_per_partition" => Ok(GeneratedField::SnapshotNumRowsPerPartition), - "snapshotMaxParallelWorkers" | "snapshot_max_parallel_workers" => Ok(GeneratedField::SnapshotMaxParallelWorkers), - "snapshotNumTablesInParallel" | "snapshot_num_tables_in_parallel" => Ok(GeneratedField::SnapshotNumTablesInParallel), - "snapshotSyncMode" | "snapshot_sync_mode" => Ok(GeneratedField::SnapshotSyncMode), - "cdcSyncMode" | "cdc_sync_mode" => Ok(GeneratedField::CdcSyncMode), - "snapshotStagingPath" | "snapshot_staging_path" => Ok(GeneratedField::SnapshotStagingPath), - "cdcStagingPath" | "cdc_staging_path" => Ok(GeneratedField::CdcStagingPath), - "softDelete" | "soft_delete" => Ok(GeneratedField::SoftDelete), - "replicationSlotName" | "replication_slot_name" => Ok(GeneratedField::ReplicationSlotName), - "pushBatchSize" | "push_batch_size" => Ok(GeneratedField::PushBatchSize), - "pushParallelism" | "push_parallelism" => Ok(GeneratedField::PushParallelism), - "resync" => Ok(GeneratedField::Resync), - "softDeleteColName" | "soft_delete_col_name" => Ok(GeneratedField::SoftDeleteColName), - "syncedAtColName" | "synced_at_col_name" => Ok(GeneratedField::SyncedAtColName), - "initialCopyOnly" | "initial_copy_only" => Ok(GeneratedField::InitialCopyOnly), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = FlowConnectionConfigs; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.FlowConnectionConfigs") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut source__ = None; - let mut destination__ = None; - let mut flow_job_name__ = None; - let mut table_schema__ = None; - let mut table_mappings__ = None; - let mut src_table_id_name_mapping__ = None; - let mut table_name_schema_mapping__ = None; - let mut metadata_peer__ = None; - let mut max_batch_size__ = None; - let mut do_initial_copy__ = None; - let mut publication_name__ = None; - let mut snapshot_num_rows_per_partition__ = None; - let mut snapshot_max_parallel_workers__ = None; - let mut snapshot_num_tables_in_parallel__ = None; - let mut snapshot_sync_mode__ = None; - let mut cdc_sync_mode__ = None; - let mut snapshot_staging_path__ = None; - let mut cdc_staging_path__ = None; - let mut soft_delete__ = None; - let mut replication_slot_name__ = None; - let mut push_batch_size__ = None; - let mut push_parallelism__ = None; - let mut resync__ = None; - let mut soft_delete_col_name__ = None; - let mut synced_at_col_name__ = None; - let mut initial_copy_only__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Source => { - if source__.is_some() { - return Err(serde::de::Error::duplicate_field("source")); - } - source__ = map.next_value()?; - } - GeneratedField::Destination => { - if destination__.is_some() { - return Err(serde::de::Error::duplicate_field("destination")); - } - destination__ = map.next_value()?; - } - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::TableSchema => { - if table_schema__.is_some() { - return Err(serde::de::Error::duplicate_field("tableSchema")); - } - table_schema__ = map.next_value()?; - } - GeneratedField::TableMappings => { - if table_mappings__.is_some() { - return Err(serde::de::Error::duplicate_field("tableMappings")); - } - table_mappings__ = Some(map.next_value()?); - } - GeneratedField::SrcTableIdNameMapping => { - if src_table_id_name_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("srcTableIdNameMapping")); - } - src_table_id_name_mapping__ = Some( - map.next_value::, _>>()? - .into_iter().map(|(k,v)| (k.0, v)).collect() - ); - } - GeneratedField::TableNameSchemaMapping => { - if table_name_schema_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("tableNameSchemaMapping")); - } - table_name_schema_mapping__ = Some( - map.next_value::>()? - ); - } - GeneratedField::MetadataPeer => { - if metadata_peer__.is_some() { - return Err(serde::de::Error::duplicate_field("metadataPeer")); - } - metadata_peer__ = map.next_value()?; - } - GeneratedField::MaxBatchSize => { - if max_batch_size__.is_some() { - return Err(serde::de::Error::duplicate_field("maxBatchSize")); - } - max_batch_size__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::DoInitialCopy => { - if do_initial_copy__.is_some() { - return Err(serde::de::Error::duplicate_field("doInitialCopy")); - } - do_initial_copy__ = Some(map.next_value()?); - } - GeneratedField::PublicationName => { - if publication_name__.is_some() { - return Err(serde::de::Error::duplicate_field("publicationName")); - } - publication_name__ = Some(map.next_value()?); - } - GeneratedField::SnapshotNumRowsPerPartition => { - if snapshot_num_rows_per_partition__.is_some() { - return Err(serde::de::Error::duplicate_field("snapshotNumRowsPerPartition")); - } - snapshot_num_rows_per_partition__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::SnapshotMaxParallelWorkers => { - if snapshot_max_parallel_workers__.is_some() { - return Err(serde::de::Error::duplicate_field("snapshotMaxParallelWorkers")); - } - snapshot_max_parallel_workers__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::SnapshotNumTablesInParallel => { - if snapshot_num_tables_in_parallel__.is_some() { - return Err(serde::de::Error::duplicate_field("snapshotNumTablesInParallel")); - } - snapshot_num_tables_in_parallel__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::SnapshotSyncMode => { - if snapshot_sync_mode__.is_some() { - return Err(serde::de::Error::duplicate_field("snapshotSyncMode")); - } - snapshot_sync_mode__ = Some(map.next_value::()? as i32); - } - GeneratedField::CdcSyncMode => { - if cdc_sync_mode__.is_some() { - return Err(serde::de::Error::duplicate_field("cdcSyncMode")); - } - cdc_sync_mode__ = Some(map.next_value::()? as i32); - } - GeneratedField::SnapshotStagingPath => { - if snapshot_staging_path__.is_some() { - return Err(serde::de::Error::duplicate_field("snapshotStagingPath")); - } - snapshot_staging_path__ = Some(map.next_value()?); - } - GeneratedField::CdcStagingPath => { - if cdc_staging_path__.is_some() { - return Err(serde::de::Error::duplicate_field("cdcStagingPath")); - } - cdc_staging_path__ = Some(map.next_value()?); - } - GeneratedField::SoftDelete => { - if soft_delete__.is_some() { - return Err(serde::de::Error::duplicate_field("softDelete")); - } - soft_delete__ = Some(map.next_value()?); - } - GeneratedField::ReplicationSlotName => { - if replication_slot_name__.is_some() { - return Err(serde::de::Error::duplicate_field("replicationSlotName")); - } - replication_slot_name__ = Some(map.next_value()?); - } - GeneratedField::PushBatchSize => { - if push_batch_size__.is_some() { - return Err(serde::de::Error::duplicate_field("pushBatchSize")); - } - push_batch_size__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::PushParallelism => { - if push_parallelism__.is_some() { - return Err(serde::de::Error::duplicate_field("pushParallelism")); - } - push_parallelism__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::Resync => { - if resync__.is_some() { - return Err(serde::de::Error::duplicate_field("resync")); - } - resync__ = Some(map.next_value()?); - } - GeneratedField::SoftDeleteColName => { - if soft_delete_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("softDeleteColName")); - } - soft_delete_col_name__ = Some(map.next_value()?); - } - GeneratedField::SyncedAtColName => { - if synced_at_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("syncedAtColName")); - } - synced_at_col_name__ = Some(map.next_value()?); - } - GeneratedField::InitialCopyOnly => { - if initial_copy_only__.is_some() { - return Err(serde::de::Error::duplicate_field("initialCopyOnly")); - } - initial_copy_only__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(FlowConnectionConfigs { - source: source__, - destination: destination__, - flow_job_name: flow_job_name__.unwrap_or_default(), - table_schema: table_schema__, - table_mappings: table_mappings__.unwrap_or_default(), - src_table_id_name_mapping: src_table_id_name_mapping__.unwrap_or_default(), - table_name_schema_mapping: table_name_schema_mapping__.unwrap_or_default(), - metadata_peer: metadata_peer__, - max_batch_size: max_batch_size__.unwrap_or_default(), - do_initial_copy: do_initial_copy__.unwrap_or_default(), - publication_name: publication_name__.unwrap_or_default(), - snapshot_num_rows_per_partition: snapshot_num_rows_per_partition__.unwrap_or_default(), - snapshot_max_parallel_workers: snapshot_max_parallel_workers__.unwrap_or_default(), - snapshot_num_tables_in_parallel: snapshot_num_tables_in_parallel__.unwrap_or_default(), - snapshot_sync_mode: snapshot_sync_mode__.unwrap_or_default(), - cdc_sync_mode: cdc_sync_mode__.unwrap_or_default(), - snapshot_staging_path: snapshot_staging_path__.unwrap_or_default(), - cdc_staging_path: cdc_staging_path__.unwrap_or_default(), - soft_delete: soft_delete__.unwrap_or_default(), - replication_slot_name: replication_slot_name__.unwrap_or_default(), - push_batch_size: push_batch_size__.unwrap_or_default(), - push_parallelism: push_parallelism__.unwrap_or_default(), - resync: resync__.unwrap_or_default(), - soft_delete_col_name: soft_delete_col_name__.unwrap_or_default(), - synced_at_col_name: synced_at_col_name__.unwrap_or_default(), - initial_copy_only: initial_copy_only__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.FlowConnectionConfigs", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for GetLastSyncedIdInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer_connection_config.is_some() { - len += 1; - } - if !self.flow_job_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.GetLastSyncedIDInput", len)?; - if let Some(v) = self.peer_connection_config.as_ref() { - struct_ser.serialize_field("peerConnectionConfig", v)?; - } - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for GetLastSyncedIdInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_connection_config", - "peerConnectionConfig", - "flow_job_name", - "flowJobName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerConnectionConfig, - FlowJobName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerConnectionConfig" | "peer_connection_config" => Ok(GeneratedField::PeerConnectionConfig), - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GetLastSyncedIdInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.GetLastSyncedIDInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_connection_config__ = None; - let mut flow_job_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerConnectionConfig => { - if peer_connection_config__.is_some() { - return Err(serde::de::Error::duplicate_field("peerConnectionConfig")); - } - peer_connection_config__ = map.next_value()?; - } - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(GetLastSyncedIdInput { - peer_connection_config: peer_connection_config__, - flow_job_name: flow_job_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.GetLastSyncedIDInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for GetOpenConnectionsForUserResult { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.user_name.is_empty() { - len += 1; - } - if self.current_open_connections != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.GetOpenConnectionsForUserResult", len)?; - if !self.user_name.is_empty() { - struct_ser.serialize_field("userName", &self.user_name)?; - } - if self.current_open_connections != 0 { - struct_ser.serialize_field("currentOpenConnections", ToString::to_string(&self.current_open_connections).as_str())?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for GetOpenConnectionsForUserResult { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "user_name", - "userName", - "current_open_connections", - "currentOpenConnections", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - UserName, - CurrentOpenConnections, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "userName" | "user_name" => Ok(GeneratedField::UserName), - "currentOpenConnections" | "current_open_connections" => Ok(GeneratedField::CurrentOpenConnections), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GetOpenConnectionsForUserResult; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.GetOpenConnectionsForUserResult") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut user_name__ = None; - let mut current_open_connections__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::UserName => { - if user_name__.is_some() { - return Err(serde::de::Error::duplicate_field("userName")); - } - user_name__ = Some(map.next_value()?); - } - GeneratedField::CurrentOpenConnections => { - if current_open_connections__.is_some() { - return Err(serde::de::Error::duplicate_field("currentOpenConnections")); - } - current_open_connections__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(GetOpenConnectionsForUserResult { - user_name: user_name__.unwrap_or_default(), - current_open_connections: current_open_connections__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.GetOpenConnectionsForUserResult", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for GetTableSchemaBatchInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer_connection_config.is_some() { - len += 1; - } - if !self.table_identifiers.is_empty() { - len += 1; - } - if !self.flow_name.is_empty() { - len += 1; - } - if self.skip_pkey_and_replica_check { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.GetTableSchemaBatchInput", len)?; - if let Some(v) = self.peer_connection_config.as_ref() { - struct_ser.serialize_field("peerConnectionConfig", v)?; - } - if !self.table_identifiers.is_empty() { - struct_ser.serialize_field("tableIdentifiers", &self.table_identifiers)?; - } - if !self.flow_name.is_empty() { - struct_ser.serialize_field("flowName", &self.flow_name)?; - } - if self.skip_pkey_and_replica_check { - struct_ser.serialize_field("skipPkeyAndReplicaCheck", &self.skip_pkey_and_replica_check)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for GetTableSchemaBatchInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_connection_config", - "peerConnectionConfig", - "table_identifiers", - "tableIdentifiers", - "flow_name", - "flowName", - "skip_pkey_and_replica_check", - "skipPkeyAndReplicaCheck", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerConnectionConfig, - TableIdentifiers, - FlowName, - SkipPkeyAndReplicaCheck, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerConnectionConfig" | "peer_connection_config" => Ok(GeneratedField::PeerConnectionConfig), - "tableIdentifiers" | "table_identifiers" => Ok(GeneratedField::TableIdentifiers), - "flowName" | "flow_name" => Ok(GeneratedField::FlowName), - "skipPkeyAndReplicaCheck" | "skip_pkey_and_replica_check" => Ok(GeneratedField::SkipPkeyAndReplicaCheck), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GetTableSchemaBatchInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.GetTableSchemaBatchInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_connection_config__ = None; - let mut table_identifiers__ = None; - let mut flow_name__ = None; - let mut skip_pkey_and_replica_check__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerConnectionConfig => { - if peer_connection_config__.is_some() { - return Err(serde::de::Error::duplicate_field("peerConnectionConfig")); - } - peer_connection_config__ = map.next_value()?; - } - GeneratedField::TableIdentifiers => { - if table_identifiers__.is_some() { - return Err(serde::de::Error::duplicate_field("tableIdentifiers")); - } - table_identifiers__ = Some(map.next_value()?); - } - GeneratedField::FlowName => { - if flow_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowName")); - } - flow_name__ = Some(map.next_value()?); - } - GeneratedField::SkipPkeyAndReplicaCheck => { - if skip_pkey_and_replica_check__.is_some() { - return Err(serde::de::Error::duplicate_field("skipPkeyAndReplicaCheck")); - } - skip_pkey_and_replica_check__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(GetTableSchemaBatchInput { - peer_connection_config: peer_connection_config__, - table_identifiers: table_identifiers__.unwrap_or_default(), - flow_name: flow_name__.unwrap_or_default(), - skip_pkey_and_replica_check: skip_pkey_and_replica_check__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.GetTableSchemaBatchInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for GetTableSchemaBatchOutput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.table_name_schema_mapping.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.GetTableSchemaBatchOutput", len)?; - if !self.table_name_schema_mapping.is_empty() { - struct_ser.serialize_field("tableNameSchemaMapping", &self.table_name_schema_mapping)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for GetTableSchemaBatchOutput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "table_name_schema_mapping", - "tableNameSchemaMapping", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - TableNameSchemaMapping, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "tableNameSchemaMapping" | "table_name_schema_mapping" => Ok(GeneratedField::TableNameSchemaMapping), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GetTableSchemaBatchOutput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.GetTableSchemaBatchOutput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut table_name_schema_mapping__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::TableNameSchemaMapping => { - if table_name_schema_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("tableNameSchemaMapping")); - } - table_name_schema_mapping__ = Some( - map.next_value::>()? - ); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(GetTableSchemaBatchOutput { - table_name_schema_mapping: table_name_schema_mapping__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.GetTableSchemaBatchOutput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for IntPartitionRange { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.start != 0 { - len += 1; - } - if self.end != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.IntPartitionRange", len)?; - if self.start != 0 { - struct_ser.serialize_field("start", ToString::to_string(&self.start).as_str())?; - } - if self.end != 0 { - struct_ser.serialize_field("end", ToString::to_string(&self.end).as_str())?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for IntPartitionRange { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "start", - "end", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Start, - End, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "start" => Ok(GeneratedField::Start), - "end" => Ok(GeneratedField::End), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = IntPartitionRange; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.IntPartitionRange") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut start__ = None; - let mut end__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Start => { - if start__.is_some() { - return Err(serde::de::Error::duplicate_field("start")); - } - start__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::End => { - if end__.is_some() { - return Err(serde::de::Error::duplicate_field("end")); - } - end__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(IntPartitionRange { - start: start__.unwrap_or_default(), - end: end__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.IntPartitionRange", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for LastSyncState { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.checkpoint != 0 { - len += 1; - } - if self.last_synced_at.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.LastSyncState", len)?; - if self.checkpoint != 0 { - struct_ser.serialize_field("checkpoint", ToString::to_string(&self.checkpoint).as_str())?; - } - if let Some(v) = self.last_synced_at.as_ref() { - struct_ser.serialize_field("lastSyncedAt", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for LastSyncState { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "checkpoint", - "last_synced_at", - "lastSyncedAt", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Checkpoint, - LastSyncedAt, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "checkpoint" => Ok(GeneratedField::Checkpoint), - "lastSyncedAt" | "last_synced_at" => Ok(GeneratedField::LastSyncedAt), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = LastSyncState; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.LastSyncState") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut checkpoint__ = None; - let mut last_synced_at__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Checkpoint => { - if checkpoint__.is_some() { - return Err(serde::de::Error::duplicate_field("checkpoint")); - } - checkpoint__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::LastSyncedAt => { - if last_synced_at__.is_some() { - return Err(serde::de::Error::duplicate_field("lastSyncedAt")); - } - last_synced_at__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(LastSyncState { - checkpoint: checkpoint__.unwrap_or_default(), - last_synced_at: last_synced_at__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.LastSyncState", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for NormalizeFlowOptions { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.batch_size != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.NormalizeFlowOptions", len)?; - if self.batch_size != 0 { - struct_ser.serialize_field("batchSize", &self.batch_size)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for NormalizeFlowOptions { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "batch_size", - "batchSize", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - BatchSize, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "batchSize" | "batch_size" => Ok(GeneratedField::BatchSize), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = NormalizeFlowOptions; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.NormalizeFlowOptions") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut batch_size__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::BatchSize => { - if batch_size__.is_some() { - return Err(serde::de::Error::duplicate_field("batchSize")); - } - batch_size__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(NormalizeFlowOptions { - batch_size: batch_size__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.NormalizeFlowOptions", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PartitionRange { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.range.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.PartitionRange", len)?; - if let Some(v) = self.range.as_ref() { - match v { - partition_range::Range::IntRange(v) => { - struct_ser.serialize_field("intRange", v)?; - } - partition_range::Range::TimestampRange(v) => { - struct_ser.serialize_field("timestampRange", v)?; - } - partition_range::Range::TidRange(v) => { - struct_ser.serialize_field("tidRange", v)?; - } - } - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PartitionRange { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "int_range", - "intRange", - "timestamp_range", - "timestampRange", - "tid_range", - "tidRange", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - IntRange, - TimestampRange, - TidRange, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "intRange" | "int_range" => Ok(GeneratedField::IntRange), - "timestampRange" | "timestamp_range" => Ok(GeneratedField::TimestampRange), - "tidRange" | "tid_range" => Ok(GeneratedField::TidRange), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PartitionRange; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.PartitionRange") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut range__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::IntRange => { - if range__.is_some() { - return Err(serde::de::Error::duplicate_field("intRange")); - } - range__ = map.next_value::<::std::option::Option<_>>()?.map(partition_range::Range::IntRange) -; - } - GeneratedField::TimestampRange => { - if range__.is_some() { - return Err(serde::de::Error::duplicate_field("timestampRange")); - } - range__ = map.next_value::<::std::option::Option<_>>()?.map(partition_range::Range::TimestampRange) -; - } - GeneratedField::TidRange => { - if range__.is_some() { - return Err(serde::de::Error::duplicate_field("tidRange")); - } - range__ = map.next_value::<::std::option::Option<_>>()?.map(partition_range::Range::TidRange) -; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PartitionRange { - range: range__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.PartitionRange", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PeerDbColumns { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.soft_delete_col_name.is_empty() { - len += 1; - } - if !self.synced_at_col_name.is_empty() { - len += 1; - } - if self.soft_delete { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.PeerDBColumns", len)?; - if !self.soft_delete_col_name.is_empty() { - struct_ser.serialize_field("softDeleteColName", &self.soft_delete_col_name)?; - } - if !self.synced_at_col_name.is_empty() { - struct_ser.serialize_field("syncedAtColName", &self.synced_at_col_name)?; - } - if self.soft_delete { - struct_ser.serialize_field("softDelete", &self.soft_delete)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PeerDbColumns { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "soft_delete_col_name", - "softDeleteColName", - "synced_at_col_name", - "syncedAtColName", - "soft_delete", - "softDelete", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - SoftDeleteColName, - SyncedAtColName, - SoftDelete, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "softDeleteColName" | "soft_delete_col_name" => Ok(GeneratedField::SoftDeleteColName), - "syncedAtColName" | "synced_at_col_name" => Ok(GeneratedField::SyncedAtColName), - "softDelete" | "soft_delete" => Ok(GeneratedField::SoftDelete), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PeerDbColumns; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.PeerDBColumns") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut soft_delete_col_name__ = None; - let mut synced_at_col_name__ = None; - let mut soft_delete__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::SoftDeleteColName => { - if soft_delete_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("softDeleteColName")); - } - soft_delete_col_name__ = Some(map.next_value()?); - } - GeneratedField::SyncedAtColName => { - if synced_at_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("syncedAtColName")); - } - synced_at_col_name__ = Some(map.next_value()?); - } - GeneratedField::SoftDelete => { - if soft_delete__.is_some() { - return Err(serde::de::Error::duplicate_field("softDelete")); - } - soft_delete__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PeerDbColumns { - soft_delete_col_name: soft_delete_col_name__.unwrap_or_default(), - synced_at_col_name: synced_at_col_name__.unwrap_or_default(), - soft_delete: soft_delete__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.PeerDBColumns", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PostgresTableIdentifier { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.rel_id != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.PostgresTableIdentifier", len)?; - if self.rel_id != 0 { - struct_ser.serialize_field("relId", &self.rel_id)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PostgresTableIdentifier { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "rel_id", - "relId", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - RelId, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "relId" | "rel_id" => Ok(GeneratedField::RelId), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PostgresTableIdentifier; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.PostgresTableIdentifier") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut rel_id__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::RelId => { - if rel_id__.is_some() { - return Err(serde::de::Error::duplicate_field("relId")); - } - rel_id__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PostgresTableIdentifier { - rel_id: rel_id__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.PostgresTableIdentifier", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for QRepConfig { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.flow_job_name.is_empty() { - len += 1; - } - if self.source_peer.is_some() { - len += 1; - } - if self.destination_peer.is_some() { - len += 1; - } - if !self.destination_table_identifier.is_empty() { - len += 1; - } - if !self.query.is_empty() { - len += 1; - } - if !self.watermark_table.is_empty() { - len += 1; - } - if !self.watermark_column.is_empty() { - len += 1; - } - if self.initial_copy_only { - len += 1; - } - if self.sync_mode != 0 { - len += 1; - } - if self.batch_size_int != 0 { - len += 1; - } - if self.batch_duration_seconds != 0 { - len += 1; - } - if self.max_parallel_workers != 0 { - len += 1; - } - if self.wait_between_batches_seconds != 0 { - len += 1; - } - if self.write_mode.is_some() { - len += 1; - } - if !self.staging_path.is_empty() { - len += 1; - } - if self.num_rows_per_partition != 0 { - len += 1; - } - if self.setup_watermark_table_on_destination { - len += 1; - } - if self.dst_table_full_resync { - len += 1; - } - if !self.synced_at_col_name.is_empty() { - len += 1; - } - if !self.soft_delete_col_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.QRepConfig", len)?; - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if let Some(v) = self.source_peer.as_ref() { - struct_ser.serialize_field("sourcePeer", v)?; - } - if let Some(v) = self.destination_peer.as_ref() { - struct_ser.serialize_field("destinationPeer", v)?; - } - if !self.destination_table_identifier.is_empty() { - struct_ser.serialize_field("destinationTableIdentifier", &self.destination_table_identifier)?; - } - if !self.query.is_empty() { - struct_ser.serialize_field("query", &self.query)?; - } - if !self.watermark_table.is_empty() { - struct_ser.serialize_field("watermarkTable", &self.watermark_table)?; - } - if !self.watermark_column.is_empty() { - struct_ser.serialize_field("watermarkColumn", &self.watermark_column)?; - } - if self.initial_copy_only { - struct_ser.serialize_field("initialCopyOnly", &self.initial_copy_only)?; - } - if self.sync_mode != 0 { - let v = QRepSyncMode::from_i32(self.sync_mode) - .ok_or_else(|| serde::ser::Error::custom(format!("Invalid variant {}", self.sync_mode)))?; - struct_ser.serialize_field("syncMode", &v)?; - } - if self.batch_size_int != 0 { - struct_ser.serialize_field("batchSizeInt", &self.batch_size_int)?; - } - if self.batch_duration_seconds != 0 { - struct_ser.serialize_field("batchDurationSeconds", &self.batch_duration_seconds)?; - } - if self.max_parallel_workers != 0 { - struct_ser.serialize_field("maxParallelWorkers", &self.max_parallel_workers)?; - } - if self.wait_between_batches_seconds != 0 { - struct_ser.serialize_field("waitBetweenBatchesSeconds", &self.wait_between_batches_seconds)?; - } - if let Some(v) = self.write_mode.as_ref() { - struct_ser.serialize_field("writeMode", v)?; - } - if !self.staging_path.is_empty() { - struct_ser.serialize_field("stagingPath", &self.staging_path)?; - } - if self.num_rows_per_partition != 0 { - struct_ser.serialize_field("numRowsPerPartition", &self.num_rows_per_partition)?; - } - if self.setup_watermark_table_on_destination { - struct_ser.serialize_field("setupWatermarkTableOnDestination", &self.setup_watermark_table_on_destination)?; - } - if self.dst_table_full_resync { - struct_ser.serialize_field("dstTableFullResync", &self.dst_table_full_resync)?; - } - if !self.synced_at_col_name.is_empty() { - struct_ser.serialize_field("syncedAtColName", &self.synced_at_col_name)?; - } - if !self.soft_delete_col_name.is_empty() { - struct_ser.serialize_field("softDeleteColName", &self.soft_delete_col_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for QRepConfig { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_job_name", - "flowJobName", - "source_peer", - "sourcePeer", - "destination_peer", - "destinationPeer", - "destination_table_identifier", - "destinationTableIdentifier", - "query", - "watermark_table", - "watermarkTable", - "watermark_column", - "watermarkColumn", - "initial_copy_only", - "initialCopyOnly", - "sync_mode", - "syncMode", - "batch_size_int", - "batchSizeInt", - "batch_duration_seconds", - "batchDurationSeconds", - "max_parallel_workers", - "maxParallelWorkers", - "wait_between_batches_seconds", - "waitBetweenBatchesSeconds", - "write_mode", - "writeMode", - "staging_path", - "stagingPath", - "num_rows_per_partition", - "numRowsPerPartition", - "setup_watermark_table_on_destination", - "setupWatermarkTableOnDestination", - "dst_table_full_resync", - "dstTableFullResync", - "synced_at_col_name", - "syncedAtColName", - "soft_delete_col_name", - "softDeleteColName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowJobName, - SourcePeer, - DestinationPeer, - DestinationTableIdentifier, - Query, - WatermarkTable, - WatermarkColumn, - InitialCopyOnly, - SyncMode, - BatchSizeInt, - BatchDurationSeconds, - MaxParallelWorkers, - WaitBetweenBatchesSeconds, - WriteMode, - StagingPath, - NumRowsPerPartition, - SetupWatermarkTableOnDestination, - DstTableFullResync, - SyncedAtColName, - SoftDeleteColName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "sourcePeer" | "source_peer" => Ok(GeneratedField::SourcePeer), - "destinationPeer" | "destination_peer" => Ok(GeneratedField::DestinationPeer), - "destinationTableIdentifier" | "destination_table_identifier" => Ok(GeneratedField::DestinationTableIdentifier), - "query" => Ok(GeneratedField::Query), - "watermarkTable" | "watermark_table" => Ok(GeneratedField::WatermarkTable), - "watermarkColumn" | "watermark_column" => Ok(GeneratedField::WatermarkColumn), - "initialCopyOnly" | "initial_copy_only" => Ok(GeneratedField::InitialCopyOnly), - "syncMode" | "sync_mode" => Ok(GeneratedField::SyncMode), - "batchSizeInt" | "batch_size_int" => Ok(GeneratedField::BatchSizeInt), - "batchDurationSeconds" | "batch_duration_seconds" => Ok(GeneratedField::BatchDurationSeconds), - "maxParallelWorkers" | "max_parallel_workers" => Ok(GeneratedField::MaxParallelWorkers), - "waitBetweenBatchesSeconds" | "wait_between_batches_seconds" => Ok(GeneratedField::WaitBetweenBatchesSeconds), - "writeMode" | "write_mode" => Ok(GeneratedField::WriteMode), - "stagingPath" | "staging_path" => Ok(GeneratedField::StagingPath), - "numRowsPerPartition" | "num_rows_per_partition" => Ok(GeneratedField::NumRowsPerPartition), - "setupWatermarkTableOnDestination" | "setup_watermark_table_on_destination" => Ok(GeneratedField::SetupWatermarkTableOnDestination), - "dstTableFullResync" | "dst_table_full_resync" => Ok(GeneratedField::DstTableFullResync), - "syncedAtColName" | "synced_at_col_name" => Ok(GeneratedField::SyncedAtColName), - "softDeleteColName" | "soft_delete_col_name" => Ok(GeneratedField::SoftDeleteColName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = QRepConfig; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.QRepConfig") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_job_name__ = None; - let mut source_peer__ = None; - let mut destination_peer__ = None; - let mut destination_table_identifier__ = None; - let mut query__ = None; - let mut watermark_table__ = None; - let mut watermark_column__ = None; - let mut initial_copy_only__ = None; - let mut sync_mode__ = None; - let mut batch_size_int__ = None; - let mut batch_duration_seconds__ = None; - let mut max_parallel_workers__ = None; - let mut wait_between_batches_seconds__ = None; - let mut write_mode__ = None; - let mut staging_path__ = None; - let mut num_rows_per_partition__ = None; - let mut setup_watermark_table_on_destination__ = None; - let mut dst_table_full_resync__ = None; - let mut synced_at_col_name__ = None; - let mut soft_delete_col_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::SourcePeer => { - if source_peer__.is_some() { - return Err(serde::de::Error::duplicate_field("sourcePeer")); - } - source_peer__ = map.next_value()?; - } - GeneratedField::DestinationPeer => { - if destination_peer__.is_some() { - return Err(serde::de::Error::duplicate_field("destinationPeer")); - } - destination_peer__ = map.next_value()?; - } - GeneratedField::DestinationTableIdentifier => { - if destination_table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("destinationTableIdentifier")); - } - destination_table_identifier__ = Some(map.next_value()?); - } - GeneratedField::Query => { - if query__.is_some() { - return Err(serde::de::Error::duplicate_field("query")); - } - query__ = Some(map.next_value()?); - } - GeneratedField::WatermarkTable => { - if watermark_table__.is_some() { - return Err(serde::de::Error::duplicate_field("watermarkTable")); - } - watermark_table__ = Some(map.next_value()?); - } - GeneratedField::WatermarkColumn => { - if watermark_column__.is_some() { - return Err(serde::de::Error::duplicate_field("watermarkColumn")); - } - watermark_column__ = Some(map.next_value()?); - } - GeneratedField::InitialCopyOnly => { - if initial_copy_only__.is_some() { - return Err(serde::de::Error::duplicate_field("initialCopyOnly")); - } - initial_copy_only__ = Some(map.next_value()?); - } - GeneratedField::SyncMode => { - if sync_mode__.is_some() { - return Err(serde::de::Error::duplicate_field("syncMode")); - } - sync_mode__ = Some(map.next_value::()? as i32); - } - GeneratedField::BatchSizeInt => { - if batch_size_int__.is_some() { - return Err(serde::de::Error::duplicate_field("batchSizeInt")); - } - batch_size_int__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::BatchDurationSeconds => { - if batch_duration_seconds__.is_some() { - return Err(serde::de::Error::duplicate_field("batchDurationSeconds")); - } - batch_duration_seconds__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::MaxParallelWorkers => { - if max_parallel_workers__.is_some() { - return Err(serde::de::Error::duplicate_field("maxParallelWorkers")); - } - max_parallel_workers__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::WaitBetweenBatchesSeconds => { - if wait_between_batches_seconds__.is_some() { - return Err(serde::de::Error::duplicate_field("waitBetweenBatchesSeconds")); - } - wait_between_batches_seconds__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::WriteMode => { - if write_mode__.is_some() { - return Err(serde::de::Error::duplicate_field("writeMode")); - } - write_mode__ = map.next_value()?; - } - GeneratedField::StagingPath => { - if staging_path__.is_some() { - return Err(serde::de::Error::duplicate_field("stagingPath")); - } - staging_path__ = Some(map.next_value()?); - } - GeneratedField::NumRowsPerPartition => { - if num_rows_per_partition__.is_some() { - return Err(serde::de::Error::duplicate_field("numRowsPerPartition")); - } - num_rows_per_partition__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::SetupWatermarkTableOnDestination => { - if setup_watermark_table_on_destination__.is_some() { - return Err(serde::de::Error::duplicate_field("setupWatermarkTableOnDestination")); - } - setup_watermark_table_on_destination__ = Some(map.next_value()?); - } - GeneratedField::DstTableFullResync => { - if dst_table_full_resync__.is_some() { - return Err(serde::de::Error::duplicate_field("dstTableFullResync")); - } - dst_table_full_resync__ = Some(map.next_value()?); - } - GeneratedField::SyncedAtColName => { - if synced_at_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("syncedAtColName")); - } - synced_at_col_name__ = Some(map.next_value()?); - } - GeneratedField::SoftDeleteColName => { - if soft_delete_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("softDeleteColName")); - } - soft_delete_col_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(QRepConfig { - flow_job_name: flow_job_name__.unwrap_or_default(), - source_peer: source_peer__, - destination_peer: destination_peer__, - destination_table_identifier: destination_table_identifier__.unwrap_or_default(), - query: query__.unwrap_or_default(), - watermark_table: watermark_table__.unwrap_or_default(), - watermark_column: watermark_column__.unwrap_or_default(), - initial_copy_only: initial_copy_only__.unwrap_or_default(), - sync_mode: sync_mode__.unwrap_or_default(), - batch_size_int: batch_size_int__.unwrap_or_default(), - batch_duration_seconds: batch_duration_seconds__.unwrap_or_default(), - max_parallel_workers: max_parallel_workers__.unwrap_or_default(), - wait_between_batches_seconds: wait_between_batches_seconds__.unwrap_or_default(), - write_mode: write_mode__, - staging_path: staging_path__.unwrap_or_default(), - num_rows_per_partition: num_rows_per_partition__.unwrap_or_default(), - setup_watermark_table_on_destination: setup_watermark_table_on_destination__.unwrap_or_default(), - dst_table_full_resync: dst_table_full_resync__.unwrap_or_default(), - synced_at_col_name: synced_at_col_name__.unwrap_or_default(), - soft_delete_col_name: soft_delete_col_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.QRepConfig", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for QRepFlowState { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.last_partition.is_some() { - len += 1; - } - if self.num_partitions_processed != 0 { - len += 1; - } - if self.needs_resync { - len += 1; - } - if self.disable_wait_for_new_rows { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.QRepFlowState", len)?; - if let Some(v) = self.last_partition.as_ref() { - struct_ser.serialize_field("lastPartition", v)?; - } - if self.num_partitions_processed != 0 { - struct_ser.serialize_field("numPartitionsProcessed", ToString::to_string(&self.num_partitions_processed).as_str())?; - } - if self.needs_resync { - struct_ser.serialize_field("needsResync", &self.needs_resync)?; - } - if self.disable_wait_for_new_rows { - struct_ser.serialize_field("disableWaitForNewRows", &self.disable_wait_for_new_rows)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for QRepFlowState { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "last_partition", - "lastPartition", - "num_partitions_processed", - "numPartitionsProcessed", - "needs_resync", - "needsResync", - "disable_wait_for_new_rows", - "disableWaitForNewRows", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - LastPartition, - NumPartitionsProcessed, - NeedsResync, - DisableWaitForNewRows, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "lastPartition" | "last_partition" => Ok(GeneratedField::LastPartition), - "numPartitionsProcessed" | "num_partitions_processed" => Ok(GeneratedField::NumPartitionsProcessed), - "needsResync" | "needs_resync" => Ok(GeneratedField::NeedsResync), - "disableWaitForNewRows" | "disable_wait_for_new_rows" => Ok(GeneratedField::DisableWaitForNewRows), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = QRepFlowState; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.QRepFlowState") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut last_partition__ = None; - let mut num_partitions_processed__ = None; - let mut needs_resync__ = None; - let mut disable_wait_for_new_rows__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::LastPartition => { - if last_partition__.is_some() { - return Err(serde::de::Error::duplicate_field("lastPartition")); - } - last_partition__ = map.next_value()?; - } - GeneratedField::NumPartitionsProcessed => { - if num_partitions_processed__.is_some() { - return Err(serde::de::Error::duplicate_field("numPartitionsProcessed")); - } - num_partitions_processed__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::NeedsResync => { - if needs_resync__.is_some() { - return Err(serde::de::Error::duplicate_field("needsResync")); - } - needs_resync__ = Some(map.next_value()?); - } - GeneratedField::DisableWaitForNewRows => { - if disable_wait_for_new_rows__.is_some() { - return Err(serde::de::Error::duplicate_field("disableWaitForNewRows")); - } - disable_wait_for_new_rows__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(QRepFlowState { - last_partition: last_partition__, - num_partitions_processed: num_partitions_processed__.unwrap_or_default(), - needs_resync: needs_resync__.unwrap_or_default(), - disable_wait_for_new_rows: disable_wait_for_new_rows__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.QRepFlowState", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for QRepParitionResult { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.partitions.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.QRepParitionResult", len)?; - if !self.partitions.is_empty() { - struct_ser.serialize_field("partitions", &self.partitions)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for QRepParitionResult { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "partitions", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Partitions, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "partitions" => Ok(GeneratedField::Partitions), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = QRepParitionResult; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.QRepParitionResult") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut partitions__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Partitions => { - if partitions__.is_some() { - return Err(serde::de::Error::duplicate_field("partitions")); - } - partitions__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(QRepParitionResult { - partitions: partitions__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.QRepParitionResult", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for QRepPartition { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.partition_id.is_empty() { - len += 1; - } - if self.range.is_some() { - len += 1; - } - if self.full_table_partition { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.QRepPartition", len)?; - if !self.partition_id.is_empty() { - struct_ser.serialize_field("partitionId", &self.partition_id)?; - } - if let Some(v) = self.range.as_ref() { - struct_ser.serialize_field("range", v)?; - } - if self.full_table_partition { - struct_ser.serialize_field("fullTablePartition", &self.full_table_partition)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for QRepPartition { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "partition_id", - "partitionId", - "range", - "full_table_partition", - "fullTablePartition", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PartitionId, - Range, - FullTablePartition, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "partitionId" | "partition_id" => Ok(GeneratedField::PartitionId), - "range" => Ok(GeneratedField::Range), - "fullTablePartition" | "full_table_partition" => Ok(GeneratedField::FullTablePartition), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = QRepPartition; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.QRepPartition") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut partition_id__ = None; - let mut range__ = None; - let mut full_table_partition__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PartitionId => { - if partition_id__.is_some() { - return Err(serde::de::Error::duplicate_field("partitionId")); - } - partition_id__ = Some(map.next_value()?); - } - GeneratedField::Range => { - if range__.is_some() { - return Err(serde::de::Error::duplicate_field("range")); - } - range__ = map.next_value()?; - } - GeneratedField::FullTablePartition => { - if full_table_partition__.is_some() { - return Err(serde::de::Error::duplicate_field("fullTablePartition")); - } - full_table_partition__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(QRepPartition { - partition_id: partition_id__.unwrap_or_default(), - range: range__, - full_table_partition: full_table_partition__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.QRepPartition", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for QRepPartitionBatch { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.batch_id != 0 { - len += 1; - } - if !self.partitions.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.QRepPartitionBatch", len)?; - if self.batch_id != 0 { - struct_ser.serialize_field("batchId", &self.batch_id)?; - } - if !self.partitions.is_empty() { - struct_ser.serialize_field("partitions", &self.partitions)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for QRepPartitionBatch { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "batch_id", - "batchId", - "partitions", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - BatchId, - Partitions, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "batchId" | "batch_id" => Ok(GeneratedField::BatchId), - "partitions" => Ok(GeneratedField::Partitions), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = QRepPartitionBatch; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.QRepPartitionBatch") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut batch_id__ = None; - let mut partitions__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::BatchId => { - if batch_id__.is_some() { - return Err(serde::de::Error::duplicate_field("batchId")); - } - batch_id__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::Partitions => { - if partitions__.is_some() { - return Err(serde::de::Error::duplicate_field("partitions")); - } - partitions__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(QRepPartitionBatch { - batch_id: batch_id__.unwrap_or_default(), - partitions: partitions__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.QRepPartitionBatch", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for QRepSyncMode { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - let variant = match self { - Self::QrepSyncModeMultiInsert => "QREP_SYNC_MODE_MULTI_INSERT", - Self::QrepSyncModeStorageAvro => "QREP_SYNC_MODE_STORAGE_AVRO", - }; - serializer.serialize_str(variant) - } -} -impl<'de> serde::Deserialize<'de> for QRepSyncMode { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "QREP_SYNC_MODE_MULTI_INSERT", - "QREP_SYNC_MODE_STORAGE_AVRO", - ]; - - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = QRepSyncMode; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - fn visit_i64(self, v: i64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(QRepSyncMode::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Signed(v), &self) - }) - } - - fn visit_u64(self, v: u64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(QRepSyncMode::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Unsigned(v), &self) - }) - } - - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "QREP_SYNC_MODE_MULTI_INSERT" => Ok(QRepSyncMode::QrepSyncModeMultiInsert), - "QREP_SYNC_MODE_STORAGE_AVRO" => Ok(QRepSyncMode::QrepSyncModeStorageAvro), - _ => Err(serde::de::Error::unknown_variant(value, FIELDS)), - } - } - } - deserializer.deserialize_any(GeneratedVisitor) - } -} -impl serde::Serialize for QRepWriteMode { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.write_type != 0 { - len += 1; - } - if !self.upsert_key_columns.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.QRepWriteMode", len)?; - if self.write_type != 0 { - let v = QRepWriteType::from_i32(self.write_type) - .ok_or_else(|| serde::ser::Error::custom(format!("Invalid variant {}", self.write_type)))?; - struct_ser.serialize_field("writeType", &v)?; - } - if !self.upsert_key_columns.is_empty() { - struct_ser.serialize_field("upsertKeyColumns", &self.upsert_key_columns)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for QRepWriteMode { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "write_type", - "writeType", - "upsert_key_columns", - "upsertKeyColumns", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - WriteType, - UpsertKeyColumns, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "writeType" | "write_type" => Ok(GeneratedField::WriteType), - "upsertKeyColumns" | "upsert_key_columns" => Ok(GeneratedField::UpsertKeyColumns), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = QRepWriteMode; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.QRepWriteMode") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut write_type__ = None; - let mut upsert_key_columns__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::WriteType => { - if write_type__.is_some() { - return Err(serde::de::Error::duplicate_field("writeType")); - } - write_type__ = Some(map.next_value::()? as i32); - } - GeneratedField::UpsertKeyColumns => { - if upsert_key_columns__.is_some() { - return Err(serde::de::Error::duplicate_field("upsertKeyColumns")); - } - upsert_key_columns__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(QRepWriteMode { - write_type: write_type__.unwrap_or_default(), - upsert_key_columns: upsert_key_columns__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.QRepWriteMode", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for QRepWriteType { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - let variant = match self { - Self::QrepWriteModeAppend => "QREP_WRITE_MODE_APPEND", - Self::QrepWriteModeUpsert => "QREP_WRITE_MODE_UPSERT", - Self::QrepWriteModeOverwrite => "QREP_WRITE_MODE_OVERWRITE", - }; - serializer.serialize_str(variant) - } -} -impl<'de> serde::Deserialize<'de> for QRepWriteType { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "QREP_WRITE_MODE_APPEND", - "QREP_WRITE_MODE_UPSERT", - "QREP_WRITE_MODE_OVERWRITE", - ]; - - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = QRepWriteType; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - fn visit_i64(self, v: i64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(QRepWriteType::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Signed(v), &self) - }) - } - - fn visit_u64(self, v: u64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(QRepWriteType::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Unsigned(v), &self) - }) - } - - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "QREP_WRITE_MODE_APPEND" => Ok(QRepWriteType::QrepWriteModeAppend), - "QREP_WRITE_MODE_UPSERT" => Ok(QRepWriteType::QrepWriteModeUpsert), - "QREP_WRITE_MODE_OVERWRITE" => Ok(QRepWriteType::QrepWriteModeOverwrite), - _ => Err(serde::de::Error::unknown_variant(value, FIELDS)), - } - } - } - deserializer.deserialize_any(GeneratedVisitor) - } -} -impl serde::Serialize for RelationMessage { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.relation_id != 0 { - len += 1; - } - if !self.relation_name.is_empty() { - len += 1; - } - if !self.columns.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.RelationMessage", len)?; - if self.relation_id != 0 { - struct_ser.serialize_field("relationId", &self.relation_id)?; - } - if !self.relation_name.is_empty() { - struct_ser.serialize_field("relationName", &self.relation_name)?; - } - if !self.columns.is_empty() { - struct_ser.serialize_field("columns", &self.columns)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for RelationMessage { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "relation_id", - "relationId", - "relation_name", - "relationName", - "columns", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - RelationId, - RelationName, - Columns, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "relationId" | "relation_id" => Ok(GeneratedField::RelationId), - "relationName" | "relation_name" => Ok(GeneratedField::RelationName), - "columns" => Ok(GeneratedField::Columns), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = RelationMessage; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.RelationMessage") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut relation_id__ = None; - let mut relation_name__ = None; - let mut columns__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::RelationId => { - if relation_id__.is_some() { - return Err(serde::de::Error::duplicate_field("relationId")); - } - relation_id__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::RelationName => { - if relation_name__.is_some() { - return Err(serde::de::Error::duplicate_field("relationName")); - } - relation_name__ = Some(map.next_value()?); - } - GeneratedField::Columns => { - if columns__.is_some() { - return Err(serde::de::Error::duplicate_field("columns")); - } - columns__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(RelationMessage { - relation_id: relation_id__.unwrap_or_default(), - relation_name: relation_name__.unwrap_or_default(), - columns: columns__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.RelationMessage", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for RelationMessageColumn { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.flags != 0 { - len += 1; - } - if !self.name.is_empty() { - len += 1; - } - if self.data_type != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.RelationMessageColumn", len)?; - if self.flags != 0 { - struct_ser.serialize_field("flags", &self.flags)?; - } - if !self.name.is_empty() { - struct_ser.serialize_field("name", &self.name)?; - } - if self.data_type != 0 { - struct_ser.serialize_field("dataType", &self.data_type)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for RelationMessageColumn { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flags", - "name", - "data_type", - "dataType", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Flags, - Name, - DataType, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flags" => Ok(GeneratedField::Flags), - "name" => Ok(GeneratedField::Name), - "dataType" | "data_type" => Ok(GeneratedField::DataType), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = RelationMessageColumn; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.RelationMessageColumn") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flags__ = None; - let mut name__ = None; - let mut data_type__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Flags => { - if flags__.is_some() { - return Err(serde::de::Error::duplicate_field("flags")); - } - flags__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::Name => { - if name__.is_some() { - return Err(serde::de::Error::duplicate_field("name")); - } - name__ = Some(map.next_value()?); - } - GeneratedField::DataType => { - if data_type__.is_some() { - return Err(serde::de::Error::duplicate_field("dataType")); - } - data_type__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(RelationMessageColumn { - flags: flags__.unwrap_or_default(), - name: name__.unwrap_or_default(), - data_type: data_type__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.RelationMessageColumn", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for RenameTableOption { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.current_name.is_empty() { - len += 1; - } - if !self.new_name.is_empty() { - len += 1; - } - if self.table_schema.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.RenameTableOption", len)?; - if !self.current_name.is_empty() { - struct_ser.serialize_field("currentName", &self.current_name)?; - } - if !self.new_name.is_empty() { - struct_ser.serialize_field("newName", &self.new_name)?; - } - if let Some(v) = self.table_schema.as_ref() { - struct_ser.serialize_field("tableSchema", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for RenameTableOption { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "current_name", - "currentName", - "new_name", - "newName", - "table_schema", - "tableSchema", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - CurrentName, - NewName, - TableSchema, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "currentName" | "current_name" => Ok(GeneratedField::CurrentName), - "newName" | "new_name" => Ok(GeneratedField::NewName), - "tableSchema" | "table_schema" => Ok(GeneratedField::TableSchema), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = RenameTableOption; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.RenameTableOption") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut current_name__ = None; - let mut new_name__ = None; - let mut table_schema__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::CurrentName => { - if current_name__.is_some() { - return Err(serde::de::Error::duplicate_field("currentName")); - } - current_name__ = Some(map.next_value()?); - } - GeneratedField::NewName => { - if new_name__.is_some() { - return Err(serde::de::Error::duplicate_field("newName")); - } - new_name__ = Some(map.next_value()?); - } - GeneratedField::TableSchema => { - if table_schema__.is_some() { - return Err(serde::de::Error::duplicate_field("tableSchema")); - } - table_schema__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(RenameTableOption { - current_name: current_name__.unwrap_or_default(), - new_name: new_name__.unwrap_or_default(), - table_schema: table_schema__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.RenameTableOption", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for RenameTablesInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.flow_job_name.is_empty() { - len += 1; - } - if self.peer.is_some() { - len += 1; - } - if !self.rename_table_options.is_empty() { - len += 1; - } - if self.soft_delete_col_name.is_some() { - len += 1; - } - if self.synced_at_col_name.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.RenameTablesInput", len)?; - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if let Some(v) = self.peer.as_ref() { - struct_ser.serialize_field("peer", v)?; - } - if !self.rename_table_options.is_empty() { - struct_ser.serialize_field("renameTableOptions", &self.rename_table_options)?; - } - if let Some(v) = self.soft_delete_col_name.as_ref() { - struct_ser.serialize_field("softDeleteColName", v)?; - } - if let Some(v) = self.synced_at_col_name.as_ref() { - struct_ser.serialize_field("syncedAtColName", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for RenameTablesInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_job_name", - "flowJobName", - "peer", - "rename_table_options", - "renameTableOptions", - "soft_delete_col_name", - "softDeleteColName", - "synced_at_col_name", - "syncedAtColName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowJobName, - Peer, - RenameTableOptions, - SoftDeleteColName, - SyncedAtColName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "peer" => Ok(GeneratedField::Peer), - "renameTableOptions" | "rename_table_options" => Ok(GeneratedField::RenameTableOptions), - "softDeleteColName" | "soft_delete_col_name" => Ok(GeneratedField::SoftDeleteColName), - "syncedAtColName" | "synced_at_col_name" => Ok(GeneratedField::SyncedAtColName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = RenameTablesInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.RenameTablesInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_job_name__ = None; - let mut peer__ = None; - let mut rename_table_options__ = None; - let mut soft_delete_col_name__ = None; - let mut synced_at_col_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::Peer => { - if peer__.is_some() { - return Err(serde::de::Error::duplicate_field("peer")); - } - peer__ = map.next_value()?; - } - GeneratedField::RenameTableOptions => { - if rename_table_options__.is_some() { - return Err(serde::de::Error::duplicate_field("renameTableOptions")); - } - rename_table_options__ = Some(map.next_value()?); - } - GeneratedField::SoftDeleteColName => { - if soft_delete_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("softDeleteColName")); - } - soft_delete_col_name__ = map.next_value()?; - } - GeneratedField::SyncedAtColName => { - if synced_at_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("syncedAtColName")); - } - synced_at_col_name__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(RenameTablesInput { - flow_job_name: flow_job_name__.unwrap_or_default(), - peer: peer__, - rename_table_options: rename_table_options__.unwrap_or_default(), - soft_delete_col_name: soft_delete_col_name__, - synced_at_col_name: synced_at_col_name__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.RenameTablesInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for RenameTablesOutput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.flow_job_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.RenameTablesOutput", len)?; - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for RenameTablesOutput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_job_name", - "flowJobName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowJobName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = RenameTablesOutput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.RenameTablesOutput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_job_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(RenameTablesOutput { - flow_job_name: flow_job_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.RenameTablesOutput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for ReplayTableSchemaDeltaInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.flow_connection_configs.is_some() { - len += 1; - } - if !self.table_schema_deltas.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.ReplayTableSchemaDeltaInput", len)?; - if let Some(v) = self.flow_connection_configs.as_ref() { - struct_ser.serialize_field("flowConnectionConfigs", v)?; - } - if !self.table_schema_deltas.is_empty() { - struct_ser.serialize_field("tableSchemaDeltas", &self.table_schema_deltas)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for ReplayTableSchemaDeltaInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_connection_configs", - "flowConnectionConfigs", - "table_schema_deltas", - "tableSchemaDeltas", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowConnectionConfigs, - TableSchemaDeltas, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowConnectionConfigs" | "flow_connection_configs" => Ok(GeneratedField::FlowConnectionConfigs), - "tableSchemaDeltas" | "table_schema_deltas" => Ok(GeneratedField::TableSchemaDeltas), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = ReplayTableSchemaDeltaInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.ReplayTableSchemaDeltaInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_connection_configs__ = None; - let mut table_schema_deltas__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowConnectionConfigs => { - if flow_connection_configs__.is_some() { - return Err(serde::de::Error::duplicate_field("flowConnectionConfigs")); - } - flow_connection_configs__ = map.next_value()?; - } - GeneratedField::TableSchemaDeltas => { - if table_schema_deltas__.is_some() { - return Err(serde::de::Error::duplicate_field("tableSchemaDeltas")); - } - table_schema_deltas__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(ReplayTableSchemaDeltaInput { - flow_connection_configs: flow_connection_configs__, - table_schema_deltas: table_schema_deltas__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.ReplayTableSchemaDeltaInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SetupInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer.is_some() { - len += 1; - } - if !self.flow_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.SetupInput", len)?; - if let Some(v) = self.peer.as_ref() { - struct_ser.serialize_field("peer", v)?; - } - if !self.flow_name.is_empty() { - struct_ser.serialize_field("flowName", &self.flow_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SetupInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer", - "flow_name", - "flowName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Peer, - FlowName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peer" => Ok(GeneratedField::Peer), - "flowName" | "flow_name" => Ok(GeneratedField::FlowName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SetupInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.SetupInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer__ = None; - let mut flow_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Peer => { - if peer__.is_some() { - return Err(serde::de::Error::duplicate_field("peer")); - } - peer__ = map.next_value()?; - } - GeneratedField::FlowName => { - if flow_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowName")); - } - flow_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SetupInput { - peer: peer__, - flow_name: flow_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.SetupInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SetupNormalizedTableBatchInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer_connection_config.is_some() { - len += 1; - } - if !self.table_name_schema_mapping.is_empty() { - len += 1; - } - if !self.soft_delete_col_name.is_empty() { - len += 1; - } - if !self.synced_at_col_name.is_empty() { - len += 1; - } - if !self.flow_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.SetupNormalizedTableBatchInput", len)?; - if let Some(v) = self.peer_connection_config.as_ref() { - struct_ser.serialize_field("peerConnectionConfig", v)?; - } - if !self.table_name_schema_mapping.is_empty() { - struct_ser.serialize_field("tableNameSchemaMapping", &self.table_name_schema_mapping)?; - } - if !self.soft_delete_col_name.is_empty() { - struct_ser.serialize_field("softDeleteColName", &self.soft_delete_col_name)?; - } - if !self.synced_at_col_name.is_empty() { - struct_ser.serialize_field("syncedAtColName", &self.synced_at_col_name)?; - } - if !self.flow_name.is_empty() { - struct_ser.serialize_field("flowName", &self.flow_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SetupNormalizedTableBatchInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_connection_config", - "peerConnectionConfig", - "table_name_schema_mapping", - "tableNameSchemaMapping", - "soft_delete_col_name", - "softDeleteColName", - "synced_at_col_name", - "syncedAtColName", - "flow_name", - "flowName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerConnectionConfig, - TableNameSchemaMapping, - SoftDeleteColName, - SyncedAtColName, - FlowName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerConnectionConfig" | "peer_connection_config" => Ok(GeneratedField::PeerConnectionConfig), - "tableNameSchemaMapping" | "table_name_schema_mapping" => Ok(GeneratedField::TableNameSchemaMapping), - "softDeleteColName" | "soft_delete_col_name" => Ok(GeneratedField::SoftDeleteColName), - "syncedAtColName" | "synced_at_col_name" => Ok(GeneratedField::SyncedAtColName), - "flowName" | "flow_name" => Ok(GeneratedField::FlowName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SetupNormalizedTableBatchInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.SetupNormalizedTableBatchInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_connection_config__ = None; - let mut table_name_schema_mapping__ = None; - let mut soft_delete_col_name__ = None; - let mut synced_at_col_name__ = None; - let mut flow_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerConnectionConfig => { - if peer_connection_config__.is_some() { - return Err(serde::de::Error::duplicate_field("peerConnectionConfig")); - } - peer_connection_config__ = map.next_value()?; - } - GeneratedField::TableNameSchemaMapping => { - if table_name_schema_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("tableNameSchemaMapping")); - } - table_name_schema_mapping__ = Some( - map.next_value::>()? - ); - } - GeneratedField::SoftDeleteColName => { - if soft_delete_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("softDeleteColName")); - } - soft_delete_col_name__ = Some(map.next_value()?); - } - GeneratedField::SyncedAtColName => { - if synced_at_col_name__.is_some() { - return Err(serde::de::Error::duplicate_field("syncedAtColName")); - } - synced_at_col_name__ = Some(map.next_value()?); - } - GeneratedField::FlowName => { - if flow_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowName")); - } - flow_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SetupNormalizedTableBatchInput { - peer_connection_config: peer_connection_config__, - table_name_schema_mapping: table_name_schema_mapping__.unwrap_or_default(), - soft_delete_col_name: soft_delete_col_name__.unwrap_or_default(), - synced_at_col_name: synced_at_col_name__.unwrap_or_default(), - flow_name: flow_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.SetupNormalizedTableBatchInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SetupNormalizedTableBatchOutput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.table_exists_mapping.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.SetupNormalizedTableBatchOutput", len)?; - if !self.table_exists_mapping.is_empty() { - struct_ser.serialize_field("tableExistsMapping", &self.table_exists_mapping)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SetupNormalizedTableBatchOutput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "table_exists_mapping", - "tableExistsMapping", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - TableExistsMapping, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "tableExistsMapping" | "table_exists_mapping" => Ok(GeneratedField::TableExistsMapping), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SetupNormalizedTableBatchOutput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.SetupNormalizedTableBatchOutput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut table_exists_mapping__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::TableExistsMapping => { - if table_exists_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("tableExistsMapping")); - } - table_exists_mapping__ = Some( - map.next_value::>()? - ); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SetupNormalizedTableBatchOutput { - table_exists_mapping: table_exists_mapping__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.SetupNormalizedTableBatchOutput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SetupNormalizedTableInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer_connection_config.is_some() { - len += 1; - } - if !self.table_identifier.is_empty() { - len += 1; - } - if self.source_table_schema.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.SetupNormalizedTableInput", len)?; - if let Some(v) = self.peer_connection_config.as_ref() { - struct_ser.serialize_field("peerConnectionConfig", v)?; - } - if !self.table_identifier.is_empty() { - struct_ser.serialize_field("tableIdentifier", &self.table_identifier)?; - } - if let Some(v) = self.source_table_schema.as_ref() { - struct_ser.serialize_field("sourceTableSchema", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SetupNormalizedTableInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_connection_config", - "peerConnectionConfig", - "table_identifier", - "tableIdentifier", - "source_table_schema", - "sourceTableSchema", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerConnectionConfig, - TableIdentifier, - SourceTableSchema, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerConnectionConfig" | "peer_connection_config" => Ok(GeneratedField::PeerConnectionConfig), - "tableIdentifier" | "table_identifier" => Ok(GeneratedField::TableIdentifier), - "sourceTableSchema" | "source_table_schema" => Ok(GeneratedField::SourceTableSchema), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SetupNormalizedTableInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.SetupNormalizedTableInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_connection_config__ = None; - let mut table_identifier__ = None; - let mut source_table_schema__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerConnectionConfig => { - if peer_connection_config__.is_some() { - return Err(serde::de::Error::duplicate_field("peerConnectionConfig")); - } - peer_connection_config__ = map.next_value()?; - } - GeneratedField::TableIdentifier => { - if table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("tableIdentifier")); - } - table_identifier__ = Some(map.next_value()?); - } - GeneratedField::SourceTableSchema => { - if source_table_schema__.is_some() { - return Err(serde::de::Error::duplicate_field("sourceTableSchema")); - } - source_table_schema__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SetupNormalizedTableInput { - peer_connection_config: peer_connection_config__, - table_identifier: table_identifier__.unwrap_or_default(), - source_table_schema: source_table_schema__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.SetupNormalizedTableInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SetupNormalizedTableOutput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.table_identifier.is_empty() { - len += 1; - } - if self.already_exists { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.SetupNormalizedTableOutput", len)?; - if !self.table_identifier.is_empty() { - struct_ser.serialize_field("tableIdentifier", &self.table_identifier)?; - } - if self.already_exists { - struct_ser.serialize_field("alreadyExists", &self.already_exists)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SetupNormalizedTableOutput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "table_identifier", - "tableIdentifier", - "already_exists", - "alreadyExists", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - TableIdentifier, - AlreadyExists, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "tableIdentifier" | "table_identifier" => Ok(GeneratedField::TableIdentifier), - "alreadyExists" | "already_exists" => Ok(GeneratedField::AlreadyExists), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SetupNormalizedTableOutput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.SetupNormalizedTableOutput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut table_identifier__ = None; - let mut already_exists__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::TableIdentifier => { - if table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("tableIdentifier")); - } - table_identifier__ = Some(map.next_value()?); - } - GeneratedField::AlreadyExists => { - if already_exists__.is_some() { - return Err(serde::de::Error::duplicate_field("alreadyExists")); - } - already_exists__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SetupNormalizedTableOutput { - table_identifier: table_identifier__.unwrap_or_default(), - already_exists: already_exists__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.SetupNormalizedTableOutput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SetupReplicationInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer_connection_config.is_some() { - len += 1; - } - if !self.flow_job_name.is_empty() { - len += 1; - } - if !self.table_name_mapping.is_empty() { - len += 1; - } - if self.destination_peer.is_some() { - len += 1; - } - if self.do_initial_copy { - len += 1; - } - if !self.existing_publication_name.is_empty() { - len += 1; - } - if !self.existing_replication_slot_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.SetupReplicationInput", len)?; - if let Some(v) = self.peer_connection_config.as_ref() { - struct_ser.serialize_field("peerConnectionConfig", v)?; - } - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if !self.table_name_mapping.is_empty() { - struct_ser.serialize_field("tableNameMapping", &self.table_name_mapping)?; - } - if let Some(v) = self.destination_peer.as_ref() { - struct_ser.serialize_field("destinationPeer", v)?; - } - if self.do_initial_copy { - struct_ser.serialize_field("doInitialCopy", &self.do_initial_copy)?; - } - if !self.existing_publication_name.is_empty() { - struct_ser.serialize_field("existingPublicationName", &self.existing_publication_name)?; - } - if !self.existing_replication_slot_name.is_empty() { - struct_ser.serialize_field("existingReplicationSlotName", &self.existing_replication_slot_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SetupReplicationInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_connection_config", - "peerConnectionConfig", - "flow_job_name", - "flowJobName", - "table_name_mapping", - "tableNameMapping", - "destination_peer", - "destinationPeer", - "do_initial_copy", - "doInitialCopy", - "existing_publication_name", - "existingPublicationName", - "existing_replication_slot_name", - "existingReplicationSlotName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerConnectionConfig, - FlowJobName, - TableNameMapping, - DestinationPeer, - DoInitialCopy, - ExistingPublicationName, - ExistingReplicationSlotName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerConnectionConfig" | "peer_connection_config" => Ok(GeneratedField::PeerConnectionConfig), - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "tableNameMapping" | "table_name_mapping" => Ok(GeneratedField::TableNameMapping), - "destinationPeer" | "destination_peer" => Ok(GeneratedField::DestinationPeer), - "doInitialCopy" | "do_initial_copy" => Ok(GeneratedField::DoInitialCopy), - "existingPublicationName" | "existing_publication_name" => Ok(GeneratedField::ExistingPublicationName), - "existingReplicationSlotName" | "existing_replication_slot_name" => Ok(GeneratedField::ExistingReplicationSlotName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SetupReplicationInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.SetupReplicationInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_connection_config__ = None; - let mut flow_job_name__ = None; - let mut table_name_mapping__ = None; - let mut destination_peer__ = None; - let mut do_initial_copy__ = None; - let mut existing_publication_name__ = None; - let mut existing_replication_slot_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerConnectionConfig => { - if peer_connection_config__.is_some() { - return Err(serde::de::Error::duplicate_field("peerConnectionConfig")); - } - peer_connection_config__ = map.next_value()?; - } - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::TableNameMapping => { - if table_name_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("tableNameMapping")); - } - table_name_mapping__ = Some( - map.next_value::>()? - ); - } - GeneratedField::DestinationPeer => { - if destination_peer__.is_some() { - return Err(serde::de::Error::duplicate_field("destinationPeer")); - } - destination_peer__ = map.next_value()?; - } - GeneratedField::DoInitialCopy => { - if do_initial_copy__.is_some() { - return Err(serde::de::Error::duplicate_field("doInitialCopy")); - } - do_initial_copy__ = Some(map.next_value()?); - } - GeneratedField::ExistingPublicationName => { - if existing_publication_name__.is_some() { - return Err(serde::de::Error::duplicate_field("existingPublicationName")); - } - existing_publication_name__ = Some(map.next_value()?); - } - GeneratedField::ExistingReplicationSlotName => { - if existing_replication_slot_name__.is_some() { - return Err(serde::de::Error::duplicate_field("existingReplicationSlotName")); - } - existing_replication_slot_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SetupReplicationInput { - peer_connection_config: peer_connection_config__, - flow_job_name: flow_job_name__.unwrap_or_default(), - table_name_mapping: table_name_mapping__.unwrap_or_default(), - destination_peer: destination_peer__, - do_initial_copy: do_initial_copy__.unwrap_or_default(), - existing_publication_name: existing_publication_name__.unwrap_or_default(), - existing_replication_slot_name: existing_replication_slot_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.SetupReplicationInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SetupReplicationOutput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.slot_name.is_empty() { - len += 1; - } - if !self.snapshot_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.SetupReplicationOutput", len)?; - if !self.slot_name.is_empty() { - struct_ser.serialize_field("slotName", &self.slot_name)?; - } - if !self.snapshot_name.is_empty() { - struct_ser.serialize_field("snapshotName", &self.snapshot_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SetupReplicationOutput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "slot_name", - "slotName", - "snapshot_name", - "snapshotName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - SlotName, - SnapshotName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "slotName" | "slot_name" => Ok(GeneratedField::SlotName), - "snapshotName" | "snapshot_name" => Ok(GeneratedField::SnapshotName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SetupReplicationOutput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.SetupReplicationOutput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut slot_name__ = None; - let mut snapshot_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::SlotName => { - if slot_name__.is_some() { - return Err(serde::de::Error::duplicate_field("slotName")); - } - slot_name__ = Some(map.next_value()?); - } - GeneratedField::SnapshotName => { - if snapshot_name__.is_some() { - return Err(serde::de::Error::duplicate_field("snapshotName")); - } - snapshot_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SetupReplicationOutput { - slot_name: slot_name__.unwrap_or_default(), - snapshot_name: snapshot_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.SetupReplicationOutput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for StartFlowInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.last_sync_state.is_some() { - len += 1; - } - if self.flow_connection_configs.is_some() { - len += 1; - } - if self.sync_flow_options.is_some() { - len += 1; - } - if !self.relation_message_mapping.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.StartFlowInput", len)?; - if let Some(v) = self.last_sync_state.as_ref() { - struct_ser.serialize_field("lastSyncState", v)?; - } - if let Some(v) = self.flow_connection_configs.as_ref() { - struct_ser.serialize_field("flowConnectionConfigs", v)?; - } - if let Some(v) = self.sync_flow_options.as_ref() { - struct_ser.serialize_field("syncFlowOptions", v)?; - } - if !self.relation_message_mapping.is_empty() { - struct_ser.serialize_field("relationMessageMapping", &self.relation_message_mapping)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for StartFlowInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "last_sync_state", - "lastSyncState", - "flow_connection_configs", - "flowConnectionConfigs", - "sync_flow_options", - "syncFlowOptions", - "relation_message_mapping", - "relationMessageMapping", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - LastSyncState, - FlowConnectionConfigs, - SyncFlowOptions, - RelationMessageMapping, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "lastSyncState" | "last_sync_state" => Ok(GeneratedField::LastSyncState), - "flowConnectionConfigs" | "flow_connection_configs" => Ok(GeneratedField::FlowConnectionConfigs), - "syncFlowOptions" | "sync_flow_options" => Ok(GeneratedField::SyncFlowOptions), - "relationMessageMapping" | "relation_message_mapping" => Ok(GeneratedField::RelationMessageMapping), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = StartFlowInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.StartFlowInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut last_sync_state__ = None; - let mut flow_connection_configs__ = None; - let mut sync_flow_options__ = None; - let mut relation_message_mapping__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::LastSyncState => { - if last_sync_state__.is_some() { - return Err(serde::de::Error::duplicate_field("lastSyncState")); - } - last_sync_state__ = map.next_value()?; - } - GeneratedField::FlowConnectionConfigs => { - if flow_connection_configs__.is_some() { - return Err(serde::de::Error::duplicate_field("flowConnectionConfigs")); - } - flow_connection_configs__ = map.next_value()?; - } - GeneratedField::SyncFlowOptions => { - if sync_flow_options__.is_some() { - return Err(serde::de::Error::duplicate_field("syncFlowOptions")); - } - sync_flow_options__ = map.next_value()?; - } - GeneratedField::RelationMessageMapping => { - if relation_message_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("relationMessageMapping")); - } - relation_message_mapping__ = Some( - map.next_value::, _>>()? - .into_iter().map(|(k,v)| (k.0, v)).collect() - ); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(StartFlowInput { - last_sync_state: last_sync_state__, - flow_connection_configs: flow_connection_configs__, - sync_flow_options: sync_flow_options__, - relation_message_mapping: relation_message_mapping__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.StartFlowInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for StartNormalizeInput { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.flow_connection_configs.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.StartNormalizeInput", len)?; - if let Some(v) = self.flow_connection_configs.as_ref() { - struct_ser.serialize_field("flowConnectionConfigs", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for StartNormalizeInput { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_connection_configs", - "flowConnectionConfigs", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowConnectionConfigs, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowConnectionConfigs" | "flow_connection_configs" => Ok(GeneratedField::FlowConnectionConfigs), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = StartNormalizeInput; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.StartNormalizeInput") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_connection_configs__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowConnectionConfigs => { - if flow_connection_configs__.is_some() { - return Err(serde::de::Error::duplicate_field("flowConnectionConfigs")); - } - flow_connection_configs__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(StartNormalizeInput { - flow_connection_configs: flow_connection_configs__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.StartNormalizeInput", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SyncFlowOptions { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.batch_size != 0 { - len += 1; - } - if !self.relation_message_mapping.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.SyncFlowOptions", len)?; - if self.batch_size != 0 { - struct_ser.serialize_field("batchSize", &self.batch_size)?; - } - if !self.relation_message_mapping.is_empty() { - struct_ser.serialize_field("relationMessageMapping", &self.relation_message_mapping)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SyncFlowOptions { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "batch_size", - "batchSize", - "relation_message_mapping", - "relationMessageMapping", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - BatchSize, - RelationMessageMapping, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "batchSize" | "batch_size" => Ok(GeneratedField::BatchSize), - "relationMessageMapping" | "relation_message_mapping" => Ok(GeneratedField::RelationMessageMapping), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SyncFlowOptions; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.SyncFlowOptions") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut batch_size__ = None; - let mut relation_message_mapping__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::BatchSize => { - if batch_size__.is_some() { - return Err(serde::de::Error::duplicate_field("batchSize")); - } - batch_size__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::RelationMessageMapping => { - if relation_message_mapping__.is_some() { - return Err(serde::de::Error::duplicate_field("relationMessageMapping")); - } - relation_message_mapping__ = Some( - map.next_value::, _>>()? - .into_iter().map(|(k,v)| (k.0, v)).collect() - ); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SyncFlowOptions { - batch_size: batch_size__.unwrap_or_default(), - relation_message_mapping: relation_message_mapping__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.SyncFlowOptions", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for Tid { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.block_number != 0 { - len += 1; - } - if self.offset_number != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.TID", len)?; - if self.block_number != 0 { - struct_ser.serialize_field("blockNumber", &self.block_number)?; - } - if self.offset_number != 0 { - struct_ser.serialize_field("offsetNumber", &self.offset_number)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for Tid { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "block_number", - "blockNumber", - "offset_number", - "offsetNumber", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - BlockNumber, - OffsetNumber, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "blockNumber" | "block_number" => Ok(GeneratedField::BlockNumber), - "offsetNumber" | "offset_number" => Ok(GeneratedField::OffsetNumber), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = Tid; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.TID") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut block_number__ = None; - let mut offset_number__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::BlockNumber => { - if block_number__.is_some() { - return Err(serde::de::Error::duplicate_field("blockNumber")); - } - block_number__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::OffsetNumber => { - if offset_number__.is_some() { - return Err(serde::de::Error::duplicate_field("offsetNumber")); - } - offset_number__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(Tid { - block_number: block_number__.unwrap_or_default(), - offset_number: offset_number__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.TID", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for TidPartitionRange { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.start.is_some() { - len += 1; - } - if self.end.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.TIDPartitionRange", len)?; - if let Some(v) = self.start.as_ref() { - struct_ser.serialize_field("start", v)?; - } - if let Some(v) = self.end.as_ref() { - struct_ser.serialize_field("end", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for TidPartitionRange { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "start", - "end", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Start, - End, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "start" => Ok(GeneratedField::Start), - "end" => Ok(GeneratedField::End), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = TidPartitionRange; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.TIDPartitionRange") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut start__ = None; - let mut end__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Start => { - if start__.is_some() { - return Err(serde::de::Error::duplicate_field("start")); - } - start__ = map.next_value()?; - } - GeneratedField::End => { - if end__.is_some() { - return Err(serde::de::Error::duplicate_field("end")); - } - end__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(TidPartitionRange { - start: start__, - end: end__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.TIDPartitionRange", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for TableIdentifier { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.table_identifier.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.TableIdentifier", len)?; - if let Some(v) = self.table_identifier.as_ref() { - match v { - table_identifier::TableIdentifier::PostgresTableIdentifier(v) => { - struct_ser.serialize_field("postgresTableIdentifier", v)?; - } - } - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for TableIdentifier { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "postgres_table_identifier", - "postgresTableIdentifier", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PostgresTableIdentifier, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "postgresTableIdentifier" | "postgres_table_identifier" => Ok(GeneratedField::PostgresTableIdentifier), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = TableIdentifier; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.TableIdentifier") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut table_identifier__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PostgresTableIdentifier => { - if table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("postgresTableIdentifier")); - } - table_identifier__ = map.next_value::<::std::option::Option<_>>()?.map(table_identifier::TableIdentifier::PostgresTableIdentifier) -; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(TableIdentifier { - table_identifier: table_identifier__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.TableIdentifier", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for TableMapping { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.source_table_identifier.is_empty() { - len += 1; - } - if !self.destination_table_identifier.is_empty() { - len += 1; - } - if !self.partition_key.is_empty() { - len += 1; - } - if !self.exclude.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.TableMapping", len)?; - if !self.source_table_identifier.is_empty() { - struct_ser.serialize_field("sourceTableIdentifier", &self.source_table_identifier)?; - } - if !self.destination_table_identifier.is_empty() { - struct_ser.serialize_field("destinationTableIdentifier", &self.destination_table_identifier)?; - } - if !self.partition_key.is_empty() { - struct_ser.serialize_field("partitionKey", &self.partition_key)?; - } - if !self.exclude.is_empty() { - struct_ser.serialize_field("exclude", &self.exclude)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for TableMapping { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "source_table_identifier", - "sourceTableIdentifier", - "destination_table_identifier", - "destinationTableIdentifier", - "partition_key", - "partitionKey", - "exclude", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - SourceTableIdentifier, - DestinationTableIdentifier, - PartitionKey, - Exclude, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "sourceTableIdentifier" | "source_table_identifier" => Ok(GeneratedField::SourceTableIdentifier), - "destinationTableIdentifier" | "destination_table_identifier" => Ok(GeneratedField::DestinationTableIdentifier), - "partitionKey" | "partition_key" => Ok(GeneratedField::PartitionKey), - "exclude" => Ok(GeneratedField::Exclude), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = TableMapping; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.TableMapping") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut source_table_identifier__ = None; - let mut destination_table_identifier__ = None; - let mut partition_key__ = None; - let mut exclude__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::SourceTableIdentifier => { - if source_table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("sourceTableIdentifier")); - } - source_table_identifier__ = Some(map.next_value()?); - } - GeneratedField::DestinationTableIdentifier => { - if destination_table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("destinationTableIdentifier")); - } - destination_table_identifier__ = Some(map.next_value()?); - } - GeneratedField::PartitionKey => { - if partition_key__.is_some() { - return Err(serde::de::Error::duplicate_field("partitionKey")); - } - partition_key__ = Some(map.next_value()?); - } - GeneratedField::Exclude => { - if exclude__.is_some() { - return Err(serde::de::Error::duplicate_field("exclude")); - } - exclude__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(TableMapping { - source_table_identifier: source_table_identifier__.unwrap_or_default(), - destination_table_identifier: destination_table_identifier__.unwrap_or_default(), - partition_key: partition_key__.unwrap_or_default(), - exclude: exclude__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.TableMapping", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for TableNameMapping { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.source_table_name.is_empty() { - len += 1; - } - if !self.destination_table_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.TableNameMapping", len)?; - if !self.source_table_name.is_empty() { - struct_ser.serialize_field("sourceTableName", &self.source_table_name)?; - } - if !self.destination_table_name.is_empty() { - struct_ser.serialize_field("destinationTableName", &self.destination_table_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for TableNameMapping { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "source_table_name", - "sourceTableName", - "destination_table_name", - "destinationTableName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - SourceTableName, - DestinationTableName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "sourceTableName" | "source_table_name" => Ok(GeneratedField::SourceTableName), - "destinationTableName" | "destination_table_name" => Ok(GeneratedField::DestinationTableName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = TableNameMapping; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.TableNameMapping") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut source_table_name__ = None; - let mut destination_table_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::SourceTableName => { - if source_table_name__.is_some() { - return Err(serde::de::Error::duplicate_field("sourceTableName")); - } - source_table_name__ = Some(map.next_value()?); - } - GeneratedField::DestinationTableName => { - if destination_table_name__.is_some() { - return Err(serde::de::Error::duplicate_field("destinationTableName")); - } - destination_table_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(TableNameMapping { - source_table_name: source_table_name__.unwrap_or_default(), - destination_table_name: destination_table_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.TableNameMapping", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for TableSchema { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.table_identifier.is_empty() { - len += 1; - } - if !self.columns.is_empty() { - len += 1; - } - if !self.primary_key_columns.is_empty() { - len += 1; - } - if self.is_replica_identity_full { - len += 1; - } - if !self.column_names.is_empty() { - len += 1; - } - if !self.column_types.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.TableSchema", len)?; - if !self.table_identifier.is_empty() { - struct_ser.serialize_field("tableIdentifier", &self.table_identifier)?; - } - if !self.columns.is_empty() { - struct_ser.serialize_field("columns", &self.columns)?; - } - if !self.primary_key_columns.is_empty() { - struct_ser.serialize_field("primaryKeyColumns", &self.primary_key_columns)?; - } - if self.is_replica_identity_full { - struct_ser.serialize_field("isReplicaIdentityFull", &self.is_replica_identity_full)?; - } - if !self.column_names.is_empty() { - struct_ser.serialize_field("columnNames", &self.column_names)?; - } - if !self.column_types.is_empty() { - struct_ser.serialize_field("columnTypes", &self.column_types)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for TableSchema { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "table_identifier", - "tableIdentifier", - "columns", - "primary_key_columns", - "primaryKeyColumns", - "is_replica_identity_full", - "isReplicaIdentityFull", - "column_names", - "columnNames", - "column_types", - "columnTypes", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - TableIdentifier, - Columns, - PrimaryKeyColumns, - IsReplicaIdentityFull, - ColumnNames, - ColumnTypes, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "tableIdentifier" | "table_identifier" => Ok(GeneratedField::TableIdentifier), - "columns" => Ok(GeneratedField::Columns), - "primaryKeyColumns" | "primary_key_columns" => Ok(GeneratedField::PrimaryKeyColumns), - "isReplicaIdentityFull" | "is_replica_identity_full" => Ok(GeneratedField::IsReplicaIdentityFull), - "columnNames" | "column_names" => Ok(GeneratedField::ColumnNames), - "columnTypes" | "column_types" => Ok(GeneratedField::ColumnTypes), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = TableSchema; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.TableSchema") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut table_identifier__ = None; - let mut columns__ = None; - let mut primary_key_columns__ = None; - let mut is_replica_identity_full__ = None; - let mut column_names__ = None; - let mut column_types__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::TableIdentifier => { - if table_identifier__.is_some() { - return Err(serde::de::Error::duplicate_field("tableIdentifier")); - } - table_identifier__ = Some(map.next_value()?); - } - GeneratedField::Columns => { - if columns__.is_some() { - return Err(serde::de::Error::duplicate_field("columns")); - } - columns__ = Some( - map.next_value::>()? - ); - } - GeneratedField::PrimaryKeyColumns => { - if primary_key_columns__.is_some() { - return Err(serde::de::Error::duplicate_field("primaryKeyColumns")); - } - primary_key_columns__ = Some(map.next_value()?); - } - GeneratedField::IsReplicaIdentityFull => { - if is_replica_identity_full__.is_some() { - return Err(serde::de::Error::duplicate_field("isReplicaIdentityFull")); - } - is_replica_identity_full__ = Some(map.next_value()?); - } - GeneratedField::ColumnNames => { - if column_names__.is_some() { - return Err(serde::de::Error::duplicate_field("columnNames")); - } - column_names__ = Some(map.next_value()?); - } - GeneratedField::ColumnTypes => { - if column_types__.is_some() { - return Err(serde::de::Error::duplicate_field("columnTypes")); - } - column_types__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(TableSchema { - table_identifier: table_identifier__.unwrap_or_default(), - columns: columns__.unwrap_or_default(), - primary_key_columns: primary_key_columns__.unwrap_or_default(), - is_replica_identity_full: is_replica_identity_full__.unwrap_or_default(), - column_names: column_names__.unwrap_or_default(), - column_types: column_types__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.TableSchema", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for TableSchemaDelta { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.src_table_name.is_empty() { - len += 1; - } - if !self.dst_table_name.is_empty() { - len += 1; - } - if !self.added_columns.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.TableSchemaDelta", len)?; - if !self.src_table_name.is_empty() { - struct_ser.serialize_field("srcTableName", &self.src_table_name)?; - } - if !self.dst_table_name.is_empty() { - struct_ser.serialize_field("dstTableName", &self.dst_table_name)?; - } - if !self.added_columns.is_empty() { - struct_ser.serialize_field("addedColumns", &self.added_columns)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for TableSchemaDelta { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "src_table_name", - "srcTableName", - "dst_table_name", - "dstTableName", - "added_columns", - "addedColumns", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - SrcTableName, - DstTableName, - AddedColumns, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "srcTableName" | "src_table_name" => Ok(GeneratedField::SrcTableName), - "dstTableName" | "dst_table_name" => Ok(GeneratedField::DstTableName), - "addedColumns" | "added_columns" => Ok(GeneratedField::AddedColumns), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = TableSchemaDelta; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.TableSchemaDelta") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut src_table_name__ = None; - let mut dst_table_name__ = None; - let mut added_columns__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::SrcTableName => { - if src_table_name__.is_some() { - return Err(serde::de::Error::duplicate_field("srcTableName")); - } - src_table_name__ = Some(map.next_value()?); - } - GeneratedField::DstTableName => { - if dst_table_name__.is_some() { - return Err(serde::de::Error::duplicate_field("dstTableName")); - } - dst_table_name__ = Some(map.next_value()?); - } - GeneratedField::AddedColumns => { - if added_columns__.is_some() { - return Err(serde::de::Error::duplicate_field("addedColumns")); - } - added_columns__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(TableSchemaDelta { - src_table_name: src_table_name__.unwrap_or_default(), - dst_table_name: dst_table_name__.unwrap_or_default(), - added_columns: added_columns__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_flow.TableSchemaDelta", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for TimestampPartitionRange { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.start.is_some() { - len += 1; - } - if self.end.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_flow.TimestampPartitionRange", len)?; - if let Some(v) = self.start.as_ref() { - struct_ser.serialize_field("start", v)?; - } - if let Some(v) = self.end.as_ref() { - struct_ser.serialize_field("end", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for TimestampPartitionRange { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "start", - "end", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Start, - End, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "start" => Ok(GeneratedField::Start), - "end" => Ok(GeneratedField::End), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = TimestampPartitionRange; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_flow.TimestampPartitionRange") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut start__ = None; - let mut end__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Start => { - if start__.is_some() { - return Err(serde::de::Error::duplicate_field("start")); - } - start__ = map.next_value()?; - } - GeneratedField::End => { - if end__.is_some() { - return Err(serde::de::Error::duplicate_field("end")); - } - end__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(TimestampPartitionRange { - start: start__, - end: end__, - }) - } - } - deserializer.deserialize_struct("peerdb_flow.TimestampPartitionRange", FIELDS, GeneratedVisitor) - } -} diff --git a/nexus/pt/src/peerdb_peers.rs b/nexus/pt/src/peerdb_peers.rs deleted file mode 100644 index 8266eea158..0000000000 --- a/nexus/pt/src/peerdb_peers.rs +++ /dev/null @@ -1,245 +0,0 @@ -// @generated -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SshConfig { - #[prost(string, tag="1")] - pub host: ::prost::alloc::string::String, - #[prost(uint32, tag="2")] - pub port: u32, - #[prost(string, tag="3")] - pub user: ::prost::alloc::string::String, - #[prost(string, tag="4")] - pub password: ::prost::alloc::string::String, - #[prost(string, tag="5")] - pub private_key: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SnowflakeConfig { - #[prost(string, tag="1")] - pub account_id: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub username: ::prost::alloc::string::String, - #[prost(string, tag="3")] - pub private_key: ::prost::alloc::string::String, - #[prost(string, tag="4")] - pub database: ::prost::alloc::string::String, - #[prost(string, tag="6")] - pub warehouse: ::prost::alloc::string::String, - #[prost(string, tag="7")] - pub role: ::prost::alloc::string::String, - #[prost(uint64, tag="8")] - pub query_timeout: u64, - #[prost(string, tag="9")] - pub s3_integration: ::prost::alloc::string::String, - #[prost(string, optional, tag="10")] - pub password: ::core::option::Option<::prost::alloc::string::String>, - /// defaults to _PEERDB_INTERNAL - #[prost(string, optional, tag="11")] - pub metadata_schema: ::core::option::Option<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BigqueryConfig { - #[prost(string, tag="1")] - pub auth_type: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub project_id: ::prost::alloc::string::String, - #[prost(string, tag="3")] - pub private_key_id: ::prost::alloc::string::String, - #[prost(string, tag="4")] - pub private_key: ::prost::alloc::string::String, - #[prost(string, tag="5")] - pub client_email: ::prost::alloc::string::String, - #[prost(string, tag="6")] - pub client_id: ::prost::alloc::string::String, - #[prost(string, tag="7")] - pub auth_uri: ::prost::alloc::string::String, - #[prost(string, tag="8")] - pub token_uri: ::prost::alloc::string::String, - #[prost(string, tag="9")] - pub auth_provider_x509_cert_url: ::prost::alloc::string::String, - #[prost(string, tag="10")] - pub client_x509_cert_url: ::prost::alloc::string::String, - #[prost(string, tag="11")] - pub dataset_id: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MongoConfig { - #[prost(string, tag="1")] - pub username: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub password: ::prost::alloc::string::String, - #[prost(string, tag="3")] - pub clusterurl: ::prost::alloc::string::String, - #[prost(int32, tag="4")] - pub clusterport: i32, - #[prost(string, tag="5")] - pub database: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PostgresConfig { - #[prost(string, tag="1")] - pub host: ::prost::alloc::string::String, - #[prost(uint32, tag="2")] - pub port: u32, - #[prost(string, tag="3")] - pub user: ::prost::alloc::string::String, - #[prost(string, tag="4")] - pub password: ::prost::alloc::string::String, - #[prost(string, tag="5")] - pub database: ::prost::alloc::string::String, - /// this is used only in query replication mode right now. - #[prost(string, tag="6")] - pub transaction_snapshot: ::prost::alloc::string::String, - /// defaults to _peerdb_internal - #[prost(string, optional, tag="7")] - pub metadata_schema: ::core::option::Option<::prost::alloc::string::String>, - #[prost(message, optional, tag="8")] - pub ssh_config: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EventHubConfig { - #[prost(string, tag="1")] - pub namespace: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub resource_group: ::prost::alloc::string::String, - #[prost(string, tag="3")] - pub location: ::prost::alloc::string::String, - #[prost(message, optional, tag="4")] - pub metadata_db: ::core::option::Option, - /// if this is empty PeerDB uses `AZURE_SUBSCRIPTION_ID` environment variable. - #[prost(string, tag="5")] - pub subscription_id: ::prost::alloc::string::String, - /// defaults to 3 - #[prost(uint32, tag="6")] - pub partition_count: u32, - /// defaults to 7 - #[prost(uint32, tag="7")] - pub message_retention_in_days: u32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EventHubGroupConfig { - /// event hub peer name to event hub config - #[prost(map="string, message", tag="1")] - pub eventhubs: ::std::collections::HashMap<::prost::alloc::string::String, EventHubConfig>, - #[prost(message, optional, tag="2")] - pub metadata_db: ::core::option::Option, - #[prost(string, repeated, tag="3")] - pub unnest_columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct S3Config { - #[prost(string, tag="1")] - pub url: ::prost::alloc::string::String, - #[prost(string, optional, tag="2")] - pub access_key_id: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag="3")] - pub secret_access_key: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag="4")] - pub role_arn: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag="5")] - pub region: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag="6")] - pub endpoint: ::core::option::Option<::prost::alloc::string::String>, - #[prost(message, optional, tag="7")] - pub metadata_db: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SqlServerConfig { - #[prost(string, tag="1")] - pub server: ::prost::alloc::string::String, - #[prost(uint32, tag="2")] - pub port: u32, - #[prost(string, tag="3")] - pub user: ::prost::alloc::string::String, - #[prost(string, tag="4")] - pub password: ::prost::alloc::string::String, - #[prost(string, tag="5")] - pub database: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Peer { - #[prost(string, tag="1")] - pub name: ::prost::alloc::string::String, - #[prost(enumeration="DbType", tag="2")] - pub r#type: i32, - #[prost(oneof="peer::Config", tags="3, 4, 5, 6, 7, 8, 9, 10")] - pub config: ::core::option::Option, -} -/// Nested message and enum types in `Peer`. -pub mod peer { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Config { - #[prost(message, tag="3")] - SnowflakeConfig(super::SnowflakeConfig), - #[prost(message, tag="4")] - BigqueryConfig(super::BigqueryConfig), - #[prost(message, tag="5")] - MongoConfig(super::MongoConfig), - #[prost(message, tag="6")] - PostgresConfig(super::PostgresConfig), - #[prost(message, tag="7")] - EventhubConfig(super::EventHubConfig), - #[prost(message, tag="8")] - S3Config(super::S3Config), - #[prost(message, tag="9")] - SqlserverConfig(super::SqlServerConfig), - #[prost(message, tag="10")] - EventhubGroupConfig(super::EventHubGroupConfig), - } -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum DbType { - Bigquery = 0, - Snowflake = 1, - Mongo = 2, - Postgres = 3, - Eventhub = 4, - S3 = 5, - Sqlserver = 6, - EventhubGroup = 7, -} -impl DbType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - DbType::Bigquery => "BIGQUERY", - DbType::Snowflake => "SNOWFLAKE", - DbType::Mongo => "MONGO", - DbType::Postgres => "POSTGRES", - DbType::Eventhub => "EVENTHUB", - DbType::S3 => "S3", - DbType::Sqlserver => "SQLSERVER", - DbType::EventhubGroup => "EVENTHUB_GROUP", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "BIGQUERY" => Some(Self::Bigquery), - "SNOWFLAKE" => Some(Self::Snowflake), - "MONGO" => Some(Self::Mongo), - "POSTGRES" => Some(Self::Postgres), - "EVENTHUB" => Some(Self::Eventhub), - "S3" => Some(Self::S3), - "SQLSERVER" => Some(Self::Sqlserver), - "EVENTHUB_GROUP" => Some(Self::EventhubGroup), - _ => None, - } - } -} -include!("peerdb_peers.serde.rs"); -// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/nexus/pt/src/peerdb_peers.serde.rs b/nexus/pt/src/peerdb_peers.serde.rs deleted file mode 100644 index 18c206865e..0000000000 --- a/nexus/pt/src/peerdb_peers.serde.rs +++ /dev/null @@ -1,2113 +0,0 @@ -// @generated -impl serde::Serialize for BigqueryConfig { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.auth_type.is_empty() { - len += 1; - } - if !self.project_id.is_empty() { - len += 1; - } - if !self.private_key_id.is_empty() { - len += 1; - } - if !self.private_key.is_empty() { - len += 1; - } - if !self.client_email.is_empty() { - len += 1; - } - if !self.client_id.is_empty() { - len += 1; - } - if !self.auth_uri.is_empty() { - len += 1; - } - if !self.token_uri.is_empty() { - len += 1; - } - if !self.auth_provider_x509_cert_url.is_empty() { - len += 1; - } - if !self.client_x509_cert_url.is_empty() { - len += 1; - } - if !self.dataset_id.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.BigqueryConfig", len)?; - if !self.auth_type.is_empty() { - struct_ser.serialize_field("authType", &self.auth_type)?; - } - if !self.project_id.is_empty() { - struct_ser.serialize_field("projectId", &self.project_id)?; - } - if !self.private_key_id.is_empty() { - struct_ser.serialize_field("privateKeyId", &self.private_key_id)?; - } - if !self.private_key.is_empty() { - struct_ser.serialize_field("privateKey", &self.private_key)?; - } - if !self.client_email.is_empty() { - struct_ser.serialize_field("clientEmail", &self.client_email)?; - } - if !self.client_id.is_empty() { - struct_ser.serialize_field("clientId", &self.client_id)?; - } - if !self.auth_uri.is_empty() { - struct_ser.serialize_field("authUri", &self.auth_uri)?; - } - if !self.token_uri.is_empty() { - struct_ser.serialize_field("tokenUri", &self.token_uri)?; - } - if !self.auth_provider_x509_cert_url.is_empty() { - struct_ser.serialize_field("authProviderX509CertUrl", &self.auth_provider_x509_cert_url)?; - } - if !self.client_x509_cert_url.is_empty() { - struct_ser.serialize_field("clientX509CertUrl", &self.client_x509_cert_url)?; - } - if !self.dataset_id.is_empty() { - struct_ser.serialize_field("datasetId", &self.dataset_id)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for BigqueryConfig { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "auth_type", - "authType", - "project_id", - "projectId", - "private_key_id", - "privateKeyId", - "private_key", - "privateKey", - "client_email", - "clientEmail", - "client_id", - "clientId", - "auth_uri", - "authUri", - "token_uri", - "tokenUri", - "auth_provider_x509_cert_url", - "authProviderX509CertUrl", - "client_x509_cert_url", - "clientX509CertUrl", - "dataset_id", - "datasetId", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - AuthType, - ProjectId, - PrivateKeyId, - PrivateKey, - ClientEmail, - ClientId, - AuthUri, - TokenUri, - AuthProviderX509CertUrl, - ClientX509CertUrl, - DatasetId, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "authType" | "auth_type" => Ok(GeneratedField::AuthType), - "projectId" | "project_id" => Ok(GeneratedField::ProjectId), - "privateKeyId" | "private_key_id" => Ok(GeneratedField::PrivateKeyId), - "privateKey" | "private_key" => Ok(GeneratedField::PrivateKey), - "clientEmail" | "client_email" => Ok(GeneratedField::ClientEmail), - "clientId" | "client_id" => Ok(GeneratedField::ClientId), - "authUri" | "auth_uri" => Ok(GeneratedField::AuthUri), - "tokenUri" | "token_uri" => Ok(GeneratedField::TokenUri), - "authProviderX509CertUrl" | "auth_provider_x509_cert_url" => Ok(GeneratedField::AuthProviderX509CertUrl), - "clientX509CertUrl" | "client_x509_cert_url" => Ok(GeneratedField::ClientX509CertUrl), - "datasetId" | "dataset_id" => Ok(GeneratedField::DatasetId), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = BigqueryConfig; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.BigqueryConfig") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut auth_type__ = None; - let mut project_id__ = None; - let mut private_key_id__ = None; - let mut private_key__ = None; - let mut client_email__ = None; - let mut client_id__ = None; - let mut auth_uri__ = None; - let mut token_uri__ = None; - let mut auth_provider_x509_cert_url__ = None; - let mut client_x509_cert_url__ = None; - let mut dataset_id__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::AuthType => { - if auth_type__.is_some() { - return Err(serde::de::Error::duplicate_field("authType")); - } - auth_type__ = Some(map.next_value()?); - } - GeneratedField::ProjectId => { - if project_id__.is_some() { - return Err(serde::de::Error::duplicate_field("projectId")); - } - project_id__ = Some(map.next_value()?); - } - GeneratedField::PrivateKeyId => { - if private_key_id__.is_some() { - return Err(serde::de::Error::duplicate_field("privateKeyId")); - } - private_key_id__ = Some(map.next_value()?); - } - GeneratedField::PrivateKey => { - if private_key__.is_some() { - return Err(serde::de::Error::duplicate_field("privateKey")); - } - private_key__ = Some(map.next_value()?); - } - GeneratedField::ClientEmail => { - if client_email__.is_some() { - return Err(serde::de::Error::duplicate_field("clientEmail")); - } - client_email__ = Some(map.next_value()?); - } - GeneratedField::ClientId => { - if client_id__.is_some() { - return Err(serde::de::Error::duplicate_field("clientId")); - } - client_id__ = Some(map.next_value()?); - } - GeneratedField::AuthUri => { - if auth_uri__.is_some() { - return Err(serde::de::Error::duplicate_field("authUri")); - } - auth_uri__ = Some(map.next_value()?); - } - GeneratedField::TokenUri => { - if token_uri__.is_some() { - return Err(serde::de::Error::duplicate_field("tokenUri")); - } - token_uri__ = Some(map.next_value()?); - } - GeneratedField::AuthProviderX509CertUrl => { - if auth_provider_x509_cert_url__.is_some() { - return Err(serde::de::Error::duplicate_field("authProviderX509CertUrl")); - } - auth_provider_x509_cert_url__ = Some(map.next_value()?); - } - GeneratedField::ClientX509CertUrl => { - if client_x509_cert_url__.is_some() { - return Err(serde::de::Error::duplicate_field("clientX509CertUrl")); - } - client_x509_cert_url__ = Some(map.next_value()?); - } - GeneratedField::DatasetId => { - if dataset_id__.is_some() { - return Err(serde::de::Error::duplicate_field("datasetId")); - } - dataset_id__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(BigqueryConfig { - auth_type: auth_type__.unwrap_or_default(), - project_id: project_id__.unwrap_or_default(), - private_key_id: private_key_id__.unwrap_or_default(), - private_key: private_key__.unwrap_or_default(), - client_email: client_email__.unwrap_or_default(), - client_id: client_id__.unwrap_or_default(), - auth_uri: auth_uri__.unwrap_or_default(), - token_uri: token_uri__.unwrap_or_default(), - auth_provider_x509_cert_url: auth_provider_x509_cert_url__.unwrap_or_default(), - client_x509_cert_url: client_x509_cert_url__.unwrap_or_default(), - dataset_id: dataset_id__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_peers.BigqueryConfig", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for DbType { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - let variant = match self { - Self::Bigquery => "BIGQUERY", - Self::Snowflake => "SNOWFLAKE", - Self::Mongo => "MONGO", - Self::Postgres => "POSTGRES", - Self::Eventhub => "EVENTHUB", - Self::S3 => "S3", - Self::Sqlserver => "SQLSERVER", - Self::EventhubGroup => "EVENTHUB_GROUP", - }; - serializer.serialize_str(variant) - } -} -impl<'de> serde::Deserialize<'de> for DbType { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "BIGQUERY", - "SNOWFLAKE", - "MONGO", - "POSTGRES", - "EVENTHUB", - "S3", - "SQLSERVER", - "EVENTHUB_GROUP", - ]; - - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = DbType; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - fn visit_i64(self, v: i64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(DbType::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Signed(v), &self) - }) - } - - fn visit_u64(self, v: u64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(DbType::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Unsigned(v), &self) - }) - } - - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "BIGQUERY" => Ok(DbType::Bigquery), - "SNOWFLAKE" => Ok(DbType::Snowflake), - "MONGO" => Ok(DbType::Mongo), - "POSTGRES" => Ok(DbType::Postgres), - "EVENTHUB" => Ok(DbType::Eventhub), - "S3" => Ok(DbType::S3), - "SQLSERVER" => Ok(DbType::Sqlserver), - "EVENTHUB_GROUP" => Ok(DbType::EventhubGroup), - _ => Err(serde::de::Error::unknown_variant(value, FIELDS)), - } - } - } - deserializer.deserialize_any(GeneratedVisitor) - } -} -impl serde::Serialize for EventHubConfig { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.namespace.is_empty() { - len += 1; - } - if !self.resource_group.is_empty() { - len += 1; - } - if !self.location.is_empty() { - len += 1; - } - if self.metadata_db.is_some() { - len += 1; - } - if !self.subscription_id.is_empty() { - len += 1; - } - if self.partition_count != 0 { - len += 1; - } - if self.message_retention_in_days != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.EventHubConfig", len)?; - if !self.namespace.is_empty() { - struct_ser.serialize_field("namespace", &self.namespace)?; - } - if !self.resource_group.is_empty() { - struct_ser.serialize_field("resourceGroup", &self.resource_group)?; - } - if !self.location.is_empty() { - struct_ser.serialize_field("location", &self.location)?; - } - if let Some(v) = self.metadata_db.as_ref() { - struct_ser.serialize_field("metadataDb", v)?; - } - if !self.subscription_id.is_empty() { - struct_ser.serialize_field("subscriptionId", &self.subscription_id)?; - } - if self.partition_count != 0 { - struct_ser.serialize_field("partitionCount", &self.partition_count)?; - } - if self.message_retention_in_days != 0 { - struct_ser.serialize_field("messageRetentionInDays", &self.message_retention_in_days)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for EventHubConfig { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "namespace", - "resource_group", - "resourceGroup", - "location", - "metadata_db", - "metadataDb", - "subscription_id", - "subscriptionId", - "partition_count", - "partitionCount", - "message_retention_in_days", - "messageRetentionInDays", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Namespace, - ResourceGroup, - Location, - MetadataDb, - SubscriptionId, - PartitionCount, - MessageRetentionInDays, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "namespace" => Ok(GeneratedField::Namespace), - "resourceGroup" | "resource_group" => Ok(GeneratedField::ResourceGroup), - "location" => Ok(GeneratedField::Location), - "metadataDb" | "metadata_db" => Ok(GeneratedField::MetadataDb), - "subscriptionId" | "subscription_id" => Ok(GeneratedField::SubscriptionId), - "partitionCount" | "partition_count" => Ok(GeneratedField::PartitionCount), - "messageRetentionInDays" | "message_retention_in_days" => Ok(GeneratedField::MessageRetentionInDays), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = EventHubConfig; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.EventHubConfig") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut namespace__ = None; - let mut resource_group__ = None; - let mut location__ = None; - let mut metadata_db__ = None; - let mut subscription_id__ = None; - let mut partition_count__ = None; - let mut message_retention_in_days__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Namespace => { - if namespace__.is_some() { - return Err(serde::de::Error::duplicate_field("namespace")); - } - namespace__ = Some(map.next_value()?); - } - GeneratedField::ResourceGroup => { - if resource_group__.is_some() { - return Err(serde::de::Error::duplicate_field("resourceGroup")); - } - resource_group__ = Some(map.next_value()?); - } - GeneratedField::Location => { - if location__.is_some() { - return Err(serde::de::Error::duplicate_field("location")); - } - location__ = Some(map.next_value()?); - } - GeneratedField::MetadataDb => { - if metadata_db__.is_some() { - return Err(serde::de::Error::duplicate_field("metadataDb")); - } - metadata_db__ = map.next_value()?; - } - GeneratedField::SubscriptionId => { - if subscription_id__.is_some() { - return Err(serde::de::Error::duplicate_field("subscriptionId")); - } - subscription_id__ = Some(map.next_value()?); - } - GeneratedField::PartitionCount => { - if partition_count__.is_some() { - return Err(serde::de::Error::duplicate_field("partitionCount")); - } - partition_count__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::MessageRetentionInDays => { - if message_retention_in_days__.is_some() { - return Err(serde::de::Error::duplicate_field("messageRetentionInDays")); - } - message_retention_in_days__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(EventHubConfig { - namespace: namespace__.unwrap_or_default(), - resource_group: resource_group__.unwrap_or_default(), - location: location__.unwrap_or_default(), - metadata_db: metadata_db__, - subscription_id: subscription_id__.unwrap_or_default(), - partition_count: partition_count__.unwrap_or_default(), - message_retention_in_days: message_retention_in_days__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_peers.EventHubConfig", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for EventHubGroupConfig { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.eventhubs.is_empty() { - len += 1; - } - if self.metadata_db.is_some() { - len += 1; - } - if !self.unnest_columns.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.EventHubGroupConfig", len)?; - if !self.eventhubs.is_empty() { - struct_ser.serialize_field("eventhubs", &self.eventhubs)?; - } - if let Some(v) = self.metadata_db.as_ref() { - struct_ser.serialize_field("metadataDb", v)?; - } - if !self.unnest_columns.is_empty() { - struct_ser.serialize_field("unnestColumns", &self.unnest_columns)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for EventHubGroupConfig { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "eventhubs", - "metadata_db", - "metadataDb", - "unnest_columns", - "unnestColumns", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Eventhubs, - MetadataDb, - UnnestColumns, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "eventhubs" => Ok(GeneratedField::Eventhubs), - "metadataDb" | "metadata_db" => Ok(GeneratedField::MetadataDb), - "unnestColumns" | "unnest_columns" => Ok(GeneratedField::UnnestColumns), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = EventHubGroupConfig; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.EventHubGroupConfig") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut eventhubs__ = None; - let mut metadata_db__ = None; - let mut unnest_columns__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Eventhubs => { - if eventhubs__.is_some() { - return Err(serde::de::Error::duplicate_field("eventhubs")); - } - eventhubs__ = Some( - map.next_value::>()? - ); - } - GeneratedField::MetadataDb => { - if metadata_db__.is_some() { - return Err(serde::de::Error::duplicate_field("metadataDb")); - } - metadata_db__ = map.next_value()?; - } - GeneratedField::UnnestColumns => { - if unnest_columns__.is_some() { - return Err(serde::de::Error::duplicate_field("unnestColumns")); - } - unnest_columns__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(EventHubGroupConfig { - eventhubs: eventhubs__.unwrap_or_default(), - metadata_db: metadata_db__, - unnest_columns: unnest_columns__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_peers.EventHubGroupConfig", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for MongoConfig { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.username.is_empty() { - len += 1; - } - if !self.password.is_empty() { - len += 1; - } - if !self.clusterurl.is_empty() { - len += 1; - } - if self.clusterport != 0 { - len += 1; - } - if !self.database.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.MongoConfig", len)?; - if !self.username.is_empty() { - struct_ser.serialize_field("username", &self.username)?; - } - if !self.password.is_empty() { - struct_ser.serialize_field("password", &self.password)?; - } - if !self.clusterurl.is_empty() { - struct_ser.serialize_field("clusterurl", &self.clusterurl)?; - } - if self.clusterport != 0 { - struct_ser.serialize_field("clusterport", &self.clusterport)?; - } - if !self.database.is_empty() { - struct_ser.serialize_field("database", &self.database)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for MongoConfig { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "username", - "password", - "clusterurl", - "clusterport", - "database", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Username, - Password, - Clusterurl, - Clusterport, - Database, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "username" => Ok(GeneratedField::Username), - "password" => Ok(GeneratedField::Password), - "clusterurl" => Ok(GeneratedField::Clusterurl), - "clusterport" => Ok(GeneratedField::Clusterport), - "database" => Ok(GeneratedField::Database), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = MongoConfig; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.MongoConfig") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut username__ = None; - let mut password__ = None; - let mut clusterurl__ = None; - let mut clusterport__ = None; - let mut database__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Username => { - if username__.is_some() { - return Err(serde::de::Error::duplicate_field("username")); - } - username__ = Some(map.next_value()?); - } - GeneratedField::Password => { - if password__.is_some() { - return Err(serde::de::Error::duplicate_field("password")); - } - password__ = Some(map.next_value()?); - } - GeneratedField::Clusterurl => { - if clusterurl__.is_some() { - return Err(serde::de::Error::duplicate_field("clusterurl")); - } - clusterurl__ = Some(map.next_value()?); - } - GeneratedField::Clusterport => { - if clusterport__.is_some() { - return Err(serde::de::Error::duplicate_field("clusterport")); - } - clusterport__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::Database => { - if database__.is_some() { - return Err(serde::de::Error::duplicate_field("database")); - } - database__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(MongoConfig { - username: username__.unwrap_or_default(), - password: password__.unwrap_or_default(), - clusterurl: clusterurl__.unwrap_or_default(), - clusterport: clusterport__.unwrap_or_default(), - database: database__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_peers.MongoConfig", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for Peer { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.name.is_empty() { - len += 1; - } - if self.r#type != 0 { - len += 1; - } - if self.config.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.Peer", len)?; - if !self.name.is_empty() { - struct_ser.serialize_field("name", &self.name)?; - } - if self.r#type != 0 { - let v = DbType::from_i32(self.r#type) - .ok_or_else(|| serde::ser::Error::custom(format!("Invalid variant {}", self.r#type)))?; - struct_ser.serialize_field("type", &v)?; - } - if let Some(v) = self.config.as_ref() { - match v { - peer::Config::SnowflakeConfig(v) => { - struct_ser.serialize_field("snowflakeConfig", v)?; - } - peer::Config::BigqueryConfig(v) => { - struct_ser.serialize_field("bigqueryConfig", v)?; - } - peer::Config::MongoConfig(v) => { - struct_ser.serialize_field("mongoConfig", v)?; - } - peer::Config::PostgresConfig(v) => { - struct_ser.serialize_field("postgresConfig", v)?; - } - peer::Config::EventhubConfig(v) => { - struct_ser.serialize_field("eventhubConfig", v)?; - } - peer::Config::S3Config(v) => { - struct_ser.serialize_field("s3Config", v)?; - } - peer::Config::SqlserverConfig(v) => { - struct_ser.serialize_field("sqlserverConfig", v)?; - } - peer::Config::EventhubGroupConfig(v) => { - struct_ser.serialize_field("eventhubGroupConfig", v)?; - } - } - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for Peer { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "name", - "type", - "snowflake_config", - "snowflakeConfig", - "bigquery_config", - "bigqueryConfig", - "mongo_config", - "mongoConfig", - "postgres_config", - "postgresConfig", - "eventhub_config", - "eventhubConfig", - "s3_config", - "s3Config", - "sqlserver_config", - "sqlserverConfig", - "eventhub_group_config", - "eventhubGroupConfig", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Name, - Type, - SnowflakeConfig, - BigqueryConfig, - MongoConfig, - PostgresConfig, - EventhubConfig, - S3Config, - SqlserverConfig, - EventhubGroupConfig, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "name" => Ok(GeneratedField::Name), - "type" => Ok(GeneratedField::Type), - "snowflakeConfig" | "snowflake_config" => Ok(GeneratedField::SnowflakeConfig), - "bigqueryConfig" | "bigquery_config" => Ok(GeneratedField::BigqueryConfig), - "mongoConfig" | "mongo_config" => Ok(GeneratedField::MongoConfig), - "postgresConfig" | "postgres_config" => Ok(GeneratedField::PostgresConfig), - "eventhubConfig" | "eventhub_config" => Ok(GeneratedField::EventhubConfig), - "s3Config" | "s3_config" => Ok(GeneratedField::S3Config), - "sqlserverConfig" | "sqlserver_config" => Ok(GeneratedField::SqlserverConfig), - "eventhubGroupConfig" | "eventhub_group_config" => Ok(GeneratedField::EventhubGroupConfig), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = Peer; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.Peer") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut name__ = None; - let mut r#type__ = None; - let mut config__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Name => { - if name__.is_some() { - return Err(serde::de::Error::duplicate_field("name")); - } - name__ = Some(map.next_value()?); - } - GeneratedField::Type => { - if r#type__.is_some() { - return Err(serde::de::Error::duplicate_field("type")); - } - r#type__ = Some(map.next_value::()? as i32); - } - GeneratedField::SnowflakeConfig => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("snowflakeConfig")); - } - config__ = map.next_value::<::std::option::Option<_>>()?.map(peer::Config::SnowflakeConfig) -; - } - GeneratedField::BigqueryConfig => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("bigqueryConfig")); - } - config__ = map.next_value::<::std::option::Option<_>>()?.map(peer::Config::BigqueryConfig) -; - } - GeneratedField::MongoConfig => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("mongoConfig")); - } - config__ = map.next_value::<::std::option::Option<_>>()?.map(peer::Config::MongoConfig) -; - } - GeneratedField::PostgresConfig => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("postgresConfig")); - } - config__ = map.next_value::<::std::option::Option<_>>()?.map(peer::Config::PostgresConfig) -; - } - GeneratedField::EventhubConfig => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("eventhubConfig")); - } - config__ = map.next_value::<::std::option::Option<_>>()?.map(peer::Config::EventhubConfig) -; - } - GeneratedField::S3Config => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("s3Config")); - } - config__ = map.next_value::<::std::option::Option<_>>()?.map(peer::Config::S3Config) -; - } - GeneratedField::SqlserverConfig => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("sqlserverConfig")); - } - config__ = map.next_value::<::std::option::Option<_>>()?.map(peer::Config::SqlserverConfig) -; - } - GeneratedField::EventhubGroupConfig => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("eventhubGroupConfig")); - } - config__ = map.next_value::<::std::option::Option<_>>()?.map(peer::Config::EventhubGroupConfig) -; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(Peer { - name: name__.unwrap_or_default(), - r#type: r#type__.unwrap_or_default(), - config: config__, - }) - } - } - deserializer.deserialize_struct("peerdb_peers.Peer", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PostgresConfig { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.host.is_empty() { - len += 1; - } - if self.port != 0 { - len += 1; - } - if !self.user.is_empty() { - len += 1; - } - if !self.password.is_empty() { - len += 1; - } - if !self.database.is_empty() { - len += 1; - } - if !self.transaction_snapshot.is_empty() { - len += 1; - } - if self.metadata_schema.is_some() { - len += 1; - } - if self.ssh_config.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.PostgresConfig", len)?; - if !self.host.is_empty() { - struct_ser.serialize_field("host", &self.host)?; - } - if self.port != 0 { - struct_ser.serialize_field("port", &self.port)?; - } - if !self.user.is_empty() { - struct_ser.serialize_field("user", &self.user)?; - } - if !self.password.is_empty() { - struct_ser.serialize_field("password", &self.password)?; - } - if !self.database.is_empty() { - struct_ser.serialize_field("database", &self.database)?; - } - if !self.transaction_snapshot.is_empty() { - struct_ser.serialize_field("transactionSnapshot", &self.transaction_snapshot)?; - } - if let Some(v) = self.metadata_schema.as_ref() { - struct_ser.serialize_field("metadataSchema", v)?; - } - if let Some(v) = self.ssh_config.as_ref() { - struct_ser.serialize_field("sshConfig", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PostgresConfig { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "host", - "port", - "user", - "password", - "database", - "transaction_snapshot", - "transactionSnapshot", - "metadata_schema", - "metadataSchema", - "ssh_config", - "sshConfig", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Host, - Port, - User, - Password, - Database, - TransactionSnapshot, - MetadataSchema, - SshConfig, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "host" => Ok(GeneratedField::Host), - "port" => Ok(GeneratedField::Port), - "user" => Ok(GeneratedField::User), - "password" => Ok(GeneratedField::Password), - "database" => Ok(GeneratedField::Database), - "transactionSnapshot" | "transaction_snapshot" => Ok(GeneratedField::TransactionSnapshot), - "metadataSchema" | "metadata_schema" => Ok(GeneratedField::MetadataSchema), - "sshConfig" | "ssh_config" => Ok(GeneratedField::SshConfig), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PostgresConfig; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.PostgresConfig") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut host__ = None; - let mut port__ = None; - let mut user__ = None; - let mut password__ = None; - let mut database__ = None; - let mut transaction_snapshot__ = None; - let mut metadata_schema__ = None; - let mut ssh_config__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Host => { - if host__.is_some() { - return Err(serde::de::Error::duplicate_field("host")); - } - host__ = Some(map.next_value()?); - } - GeneratedField::Port => { - if port__.is_some() { - return Err(serde::de::Error::duplicate_field("port")); - } - port__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::User => { - if user__.is_some() { - return Err(serde::de::Error::duplicate_field("user")); - } - user__ = Some(map.next_value()?); - } - GeneratedField::Password => { - if password__.is_some() { - return Err(serde::de::Error::duplicate_field("password")); - } - password__ = Some(map.next_value()?); - } - GeneratedField::Database => { - if database__.is_some() { - return Err(serde::de::Error::duplicate_field("database")); - } - database__ = Some(map.next_value()?); - } - GeneratedField::TransactionSnapshot => { - if transaction_snapshot__.is_some() { - return Err(serde::de::Error::duplicate_field("transactionSnapshot")); - } - transaction_snapshot__ = Some(map.next_value()?); - } - GeneratedField::MetadataSchema => { - if metadata_schema__.is_some() { - return Err(serde::de::Error::duplicate_field("metadataSchema")); - } - metadata_schema__ = map.next_value()?; - } - GeneratedField::SshConfig => { - if ssh_config__.is_some() { - return Err(serde::de::Error::duplicate_field("sshConfig")); - } - ssh_config__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PostgresConfig { - host: host__.unwrap_or_default(), - port: port__.unwrap_or_default(), - user: user__.unwrap_or_default(), - password: password__.unwrap_or_default(), - database: database__.unwrap_or_default(), - transaction_snapshot: transaction_snapshot__.unwrap_or_default(), - metadata_schema: metadata_schema__, - ssh_config: ssh_config__, - }) - } - } - deserializer.deserialize_struct("peerdb_peers.PostgresConfig", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for S3Config { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.url.is_empty() { - len += 1; - } - if self.access_key_id.is_some() { - len += 1; - } - if self.secret_access_key.is_some() { - len += 1; - } - if self.role_arn.is_some() { - len += 1; - } - if self.region.is_some() { - len += 1; - } - if self.endpoint.is_some() { - len += 1; - } - if self.metadata_db.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.S3Config", len)?; - if !self.url.is_empty() { - struct_ser.serialize_field("url", &self.url)?; - } - if let Some(v) = self.access_key_id.as_ref() { - struct_ser.serialize_field("accessKeyId", v)?; - } - if let Some(v) = self.secret_access_key.as_ref() { - struct_ser.serialize_field("secretAccessKey", v)?; - } - if let Some(v) = self.role_arn.as_ref() { - struct_ser.serialize_field("roleArn", v)?; - } - if let Some(v) = self.region.as_ref() { - struct_ser.serialize_field("region", v)?; - } - if let Some(v) = self.endpoint.as_ref() { - struct_ser.serialize_field("endpoint", v)?; - } - if let Some(v) = self.metadata_db.as_ref() { - struct_ser.serialize_field("metadataDb", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for S3Config { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "url", - "access_key_id", - "accessKeyId", - "secret_access_key", - "secretAccessKey", - "role_arn", - "roleArn", - "region", - "endpoint", - "metadata_db", - "metadataDb", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Url, - AccessKeyId, - SecretAccessKey, - RoleArn, - Region, - Endpoint, - MetadataDb, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "url" => Ok(GeneratedField::Url), - "accessKeyId" | "access_key_id" => Ok(GeneratedField::AccessKeyId), - "secretAccessKey" | "secret_access_key" => Ok(GeneratedField::SecretAccessKey), - "roleArn" | "role_arn" => Ok(GeneratedField::RoleArn), - "region" => Ok(GeneratedField::Region), - "endpoint" => Ok(GeneratedField::Endpoint), - "metadataDb" | "metadata_db" => Ok(GeneratedField::MetadataDb), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = S3Config; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.S3Config") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut url__ = None; - let mut access_key_id__ = None; - let mut secret_access_key__ = None; - let mut role_arn__ = None; - let mut region__ = None; - let mut endpoint__ = None; - let mut metadata_db__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Url => { - if url__.is_some() { - return Err(serde::de::Error::duplicate_field("url")); - } - url__ = Some(map.next_value()?); - } - GeneratedField::AccessKeyId => { - if access_key_id__.is_some() { - return Err(serde::de::Error::duplicate_field("accessKeyId")); - } - access_key_id__ = map.next_value()?; - } - GeneratedField::SecretAccessKey => { - if secret_access_key__.is_some() { - return Err(serde::de::Error::duplicate_field("secretAccessKey")); - } - secret_access_key__ = map.next_value()?; - } - GeneratedField::RoleArn => { - if role_arn__.is_some() { - return Err(serde::de::Error::duplicate_field("roleArn")); - } - role_arn__ = map.next_value()?; - } - GeneratedField::Region => { - if region__.is_some() { - return Err(serde::de::Error::duplicate_field("region")); - } - region__ = map.next_value()?; - } - GeneratedField::Endpoint => { - if endpoint__.is_some() { - return Err(serde::de::Error::duplicate_field("endpoint")); - } - endpoint__ = map.next_value()?; - } - GeneratedField::MetadataDb => { - if metadata_db__.is_some() { - return Err(serde::de::Error::duplicate_field("metadataDb")); - } - metadata_db__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(S3Config { - url: url__.unwrap_or_default(), - access_key_id: access_key_id__, - secret_access_key: secret_access_key__, - role_arn: role_arn__, - region: region__, - endpoint: endpoint__, - metadata_db: metadata_db__, - }) - } - } - deserializer.deserialize_struct("peerdb_peers.S3Config", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SshConfig { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.host.is_empty() { - len += 1; - } - if self.port != 0 { - len += 1; - } - if !self.user.is_empty() { - len += 1; - } - if !self.password.is_empty() { - len += 1; - } - if !self.private_key.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.SSHConfig", len)?; - if !self.host.is_empty() { - struct_ser.serialize_field("host", &self.host)?; - } - if self.port != 0 { - struct_ser.serialize_field("port", &self.port)?; - } - if !self.user.is_empty() { - struct_ser.serialize_field("user", &self.user)?; - } - if !self.password.is_empty() { - struct_ser.serialize_field("password", &self.password)?; - } - if !self.private_key.is_empty() { - struct_ser.serialize_field("privateKey", &self.private_key)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SshConfig { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "host", - "port", - "user", - "password", - "private_key", - "privateKey", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Host, - Port, - User, - Password, - PrivateKey, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "host" => Ok(GeneratedField::Host), - "port" => Ok(GeneratedField::Port), - "user" => Ok(GeneratedField::User), - "password" => Ok(GeneratedField::Password), - "privateKey" | "private_key" => Ok(GeneratedField::PrivateKey), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SshConfig; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.SSHConfig") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut host__ = None; - let mut port__ = None; - let mut user__ = None; - let mut password__ = None; - let mut private_key__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Host => { - if host__.is_some() { - return Err(serde::de::Error::duplicate_field("host")); - } - host__ = Some(map.next_value()?); - } - GeneratedField::Port => { - if port__.is_some() { - return Err(serde::de::Error::duplicate_field("port")); - } - port__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::User => { - if user__.is_some() { - return Err(serde::de::Error::duplicate_field("user")); - } - user__ = Some(map.next_value()?); - } - GeneratedField::Password => { - if password__.is_some() { - return Err(serde::de::Error::duplicate_field("password")); - } - password__ = Some(map.next_value()?); - } - GeneratedField::PrivateKey => { - if private_key__.is_some() { - return Err(serde::de::Error::duplicate_field("privateKey")); - } - private_key__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SshConfig { - host: host__.unwrap_or_default(), - port: port__.unwrap_or_default(), - user: user__.unwrap_or_default(), - password: password__.unwrap_or_default(), - private_key: private_key__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_peers.SSHConfig", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SnowflakeConfig { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.account_id.is_empty() { - len += 1; - } - if !self.username.is_empty() { - len += 1; - } - if !self.private_key.is_empty() { - len += 1; - } - if !self.database.is_empty() { - len += 1; - } - if !self.warehouse.is_empty() { - len += 1; - } - if !self.role.is_empty() { - len += 1; - } - if self.query_timeout != 0 { - len += 1; - } - if !self.s3_integration.is_empty() { - len += 1; - } - if self.password.is_some() { - len += 1; - } - if self.metadata_schema.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.SnowflakeConfig", len)?; - if !self.account_id.is_empty() { - struct_ser.serialize_field("accountId", &self.account_id)?; - } - if !self.username.is_empty() { - struct_ser.serialize_field("username", &self.username)?; - } - if !self.private_key.is_empty() { - struct_ser.serialize_field("privateKey", &self.private_key)?; - } - if !self.database.is_empty() { - struct_ser.serialize_field("database", &self.database)?; - } - if !self.warehouse.is_empty() { - struct_ser.serialize_field("warehouse", &self.warehouse)?; - } - if !self.role.is_empty() { - struct_ser.serialize_field("role", &self.role)?; - } - if self.query_timeout != 0 { - struct_ser.serialize_field("queryTimeout", ToString::to_string(&self.query_timeout).as_str())?; - } - if !self.s3_integration.is_empty() { - struct_ser.serialize_field("s3Integration", &self.s3_integration)?; - } - if let Some(v) = self.password.as_ref() { - struct_ser.serialize_field("password", v)?; - } - if let Some(v) = self.metadata_schema.as_ref() { - struct_ser.serialize_field("metadataSchema", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SnowflakeConfig { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "account_id", - "accountId", - "username", - "private_key", - "privateKey", - "database", - "warehouse", - "role", - "query_timeout", - "queryTimeout", - "s3_integration", - "s3Integration", - "password", - "metadata_schema", - "metadataSchema", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - AccountId, - Username, - PrivateKey, - Database, - Warehouse, - Role, - QueryTimeout, - S3Integration, - Password, - MetadataSchema, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "accountId" | "account_id" => Ok(GeneratedField::AccountId), - "username" => Ok(GeneratedField::Username), - "privateKey" | "private_key" => Ok(GeneratedField::PrivateKey), - "database" => Ok(GeneratedField::Database), - "warehouse" => Ok(GeneratedField::Warehouse), - "role" => Ok(GeneratedField::Role), - "queryTimeout" | "query_timeout" => Ok(GeneratedField::QueryTimeout), - "s3Integration" | "s3_integration" => Ok(GeneratedField::S3Integration), - "password" => Ok(GeneratedField::Password), - "metadataSchema" | "metadata_schema" => Ok(GeneratedField::MetadataSchema), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SnowflakeConfig; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.SnowflakeConfig") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut account_id__ = None; - let mut username__ = None; - let mut private_key__ = None; - let mut database__ = None; - let mut warehouse__ = None; - let mut role__ = None; - let mut query_timeout__ = None; - let mut s3_integration__ = None; - let mut password__ = None; - let mut metadata_schema__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::AccountId => { - if account_id__.is_some() { - return Err(serde::de::Error::duplicate_field("accountId")); - } - account_id__ = Some(map.next_value()?); - } - GeneratedField::Username => { - if username__.is_some() { - return Err(serde::de::Error::duplicate_field("username")); - } - username__ = Some(map.next_value()?); - } - GeneratedField::PrivateKey => { - if private_key__.is_some() { - return Err(serde::de::Error::duplicate_field("privateKey")); - } - private_key__ = Some(map.next_value()?); - } - GeneratedField::Database => { - if database__.is_some() { - return Err(serde::de::Error::duplicate_field("database")); - } - database__ = Some(map.next_value()?); - } - GeneratedField::Warehouse => { - if warehouse__.is_some() { - return Err(serde::de::Error::duplicate_field("warehouse")); - } - warehouse__ = Some(map.next_value()?); - } - GeneratedField::Role => { - if role__.is_some() { - return Err(serde::de::Error::duplicate_field("role")); - } - role__ = Some(map.next_value()?); - } - GeneratedField::QueryTimeout => { - if query_timeout__.is_some() { - return Err(serde::de::Error::duplicate_field("queryTimeout")); - } - query_timeout__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::S3Integration => { - if s3_integration__.is_some() { - return Err(serde::de::Error::duplicate_field("s3Integration")); - } - s3_integration__ = Some(map.next_value()?); - } - GeneratedField::Password => { - if password__.is_some() { - return Err(serde::de::Error::duplicate_field("password")); - } - password__ = map.next_value()?; - } - GeneratedField::MetadataSchema => { - if metadata_schema__.is_some() { - return Err(serde::de::Error::duplicate_field("metadataSchema")); - } - metadata_schema__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SnowflakeConfig { - account_id: account_id__.unwrap_or_default(), - username: username__.unwrap_or_default(), - private_key: private_key__.unwrap_or_default(), - database: database__.unwrap_or_default(), - warehouse: warehouse__.unwrap_or_default(), - role: role__.unwrap_or_default(), - query_timeout: query_timeout__.unwrap_or_default(), - s3_integration: s3_integration__.unwrap_or_default(), - password: password__, - metadata_schema: metadata_schema__, - }) - } - } - deserializer.deserialize_struct("peerdb_peers.SnowflakeConfig", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SqlServerConfig { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.server.is_empty() { - len += 1; - } - if self.port != 0 { - len += 1; - } - if !self.user.is_empty() { - len += 1; - } - if !self.password.is_empty() { - len += 1; - } - if !self.database.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_peers.SqlServerConfig", len)?; - if !self.server.is_empty() { - struct_ser.serialize_field("server", &self.server)?; - } - if self.port != 0 { - struct_ser.serialize_field("port", &self.port)?; - } - if !self.user.is_empty() { - struct_ser.serialize_field("user", &self.user)?; - } - if !self.password.is_empty() { - struct_ser.serialize_field("password", &self.password)?; - } - if !self.database.is_empty() { - struct_ser.serialize_field("database", &self.database)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SqlServerConfig { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "server", - "port", - "user", - "password", - "database", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Server, - Port, - User, - Password, - Database, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "server" => Ok(GeneratedField::Server), - "port" => Ok(GeneratedField::Port), - "user" => Ok(GeneratedField::User), - "password" => Ok(GeneratedField::Password), - "database" => Ok(GeneratedField::Database), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SqlServerConfig; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_peers.SqlServerConfig") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut server__ = None; - let mut port__ = None; - let mut user__ = None; - let mut password__ = None; - let mut database__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Server => { - if server__.is_some() { - return Err(serde::de::Error::duplicate_field("server")); - } - server__ = Some(map.next_value()?); - } - GeneratedField::Port => { - if port__.is_some() { - return Err(serde::de::Error::duplicate_field("port")); - } - port__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::User => { - if user__.is_some() { - return Err(serde::de::Error::duplicate_field("user")); - } - user__ = Some(map.next_value()?); - } - GeneratedField::Password => { - if password__.is_some() { - return Err(serde::de::Error::duplicate_field("password")); - } - password__ = Some(map.next_value()?); - } - GeneratedField::Database => { - if database__.is_some() { - return Err(serde::de::Error::duplicate_field("database")); - } - database__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SqlServerConfig { - server: server__.unwrap_or_default(), - port: port__.unwrap_or_default(), - user: user__.unwrap_or_default(), - password: password__.unwrap_or_default(), - database: database__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_peers.SqlServerConfig", FIELDS, GeneratedVisitor) - } -} diff --git a/nexus/pt/src/peerdb_route.rs b/nexus/pt/src/peerdb_route.rs deleted file mode 100644 index 238ecb1a40..0000000000 --- a/nexus/pt/src/peerdb_route.rs +++ /dev/null @@ -1,385 +0,0 @@ -// @generated -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateCdcFlowRequest { - #[prost(message, optional, tag="1")] - pub connection_configs: ::core::option::Option, - #[prost(bool, tag="2")] - pub create_catalog_entry: bool, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateCdcFlowResponse { - #[prost(string, tag="1")] - pub worflow_id: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateQRepFlowRequest { - #[prost(message, optional, tag="1")] - pub qrep_config: ::core::option::Option, - #[prost(bool, tag="2")] - pub create_catalog_entry: bool, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateQRepFlowResponse { - #[prost(string, tag="1")] - pub worflow_id: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ShutdownRequest { - #[prost(string, tag="1")] - pub workflow_id: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(message, optional, tag="3")] - pub source_peer: ::core::option::Option, - #[prost(message, optional, tag="4")] - pub destination_peer: ::core::option::Option, - #[prost(bool, tag="5")] - pub remove_flow_entry: bool, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ShutdownResponse { - #[prost(bool, tag="1")] - pub ok: bool, - #[prost(string, tag="2")] - pub error_message: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ValidatePeerRequest { - #[prost(message, optional, tag="1")] - pub peer: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreatePeerRequest { - #[prost(message, optional, tag="1")] - pub peer: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DropPeerRequest { - #[prost(string, tag="1")] - pub peer_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DropPeerResponse { - #[prost(bool, tag="1")] - pub ok: bool, - #[prost(string, tag="2")] - pub error_message: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ValidatePeerResponse { - #[prost(enumeration="ValidatePeerStatus", tag="1")] - pub status: i32, - #[prost(string, tag="2")] - pub message: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreatePeerResponse { - #[prost(enumeration="CreatePeerStatus", tag="1")] - pub status: i32, - #[prost(string, tag="2")] - pub message: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MirrorStatusRequest { - #[prost(string, tag="1")] - pub flow_job_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PartitionStatus { - #[prost(string, tag="1")] - pub partition_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] - pub start_time: ::core::option::Option<::pbjson_types::Timestamp>, - #[prost(message, optional, tag="3")] - pub end_time: ::core::option::Option<::pbjson_types::Timestamp>, - #[prost(int32, tag="4")] - pub num_rows: i32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QRepMirrorStatus { - #[prost(message, optional, tag="1")] - pub config: ::core::option::Option, - /// TODO make note to see if we are still in initial copy - /// or if we are in the continuous streaming mode. - #[prost(message, repeated, tag="2")] - pub partitions: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CdcSyncStatus { - #[prost(int64, tag="1")] - pub start_lsn: i64, - #[prost(int64, tag="2")] - pub end_lsn: i64, - #[prost(int32, tag="3")] - pub num_rows: i32, - #[prost(message, optional, tag="4")] - pub start_time: ::core::option::Option<::pbjson_types::Timestamp>, - #[prost(message, optional, tag="5")] - pub end_time: ::core::option::Option<::pbjson_types::Timestamp>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PeerSchemasResponse { - #[prost(string, repeated, tag="1")] - pub schemas: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SchemaTablesRequest { - #[prost(string, tag="1")] - pub peer_name: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub schema_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SchemaTablesResponse { - #[prost(string, repeated, tag="1")] - pub tables: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AllTablesResponse { - #[prost(string, repeated, tag="1")] - pub tables: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TableColumnsRequest { - #[prost(string, tag="1")] - pub peer_name: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub schema_name: ::prost::alloc::string::String, - #[prost(string, tag="3")] - pub table_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TableColumnsResponse { - #[prost(string, repeated, tag="1")] - pub columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PostgresPeerActivityInfoRequest { - #[prost(string, tag="1")] - pub peer_name: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SlotInfo { - #[prost(string, tag="1")] - pub slot_name: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub redo_l_sn: ::prost::alloc::string::String, - #[prost(string, tag="3")] - pub restart_l_sn: ::prost::alloc::string::String, - #[prost(bool, tag="4")] - pub active: bool, - #[prost(float, tag="5")] - pub lag_in_mb: f32, - #[prost(string, tag="6")] - pub confirmed_flush_l_sn: ::prost::alloc::string::String, - #[prost(string, tag="7")] - pub wal_status: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StatInfo { - #[prost(int64, tag="1")] - pub pid: i64, - #[prost(string, tag="2")] - pub wait_event: ::prost::alloc::string::String, - #[prost(string, tag="3")] - pub wait_event_type: ::prost::alloc::string::String, - #[prost(string, tag="4")] - pub query_start: ::prost::alloc::string::String, - #[prost(string, tag="5")] - pub query: ::prost::alloc::string::String, - #[prost(float, tag="6")] - pub duration: f32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PeerSlotResponse { - #[prost(message, repeated, tag="1")] - pub slot_data: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PeerStatResponse { - #[prost(message, repeated, tag="1")] - pub stat_data: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SnapshotStatus { - #[prost(message, repeated, tag="1")] - pub clones: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CdcMirrorStatus { - #[prost(message, optional, tag="1")] - pub config: ::core::option::Option, - #[prost(message, optional, tag="2")] - pub snapshot_status: ::core::option::Option, - #[prost(message, repeated, tag="3")] - pub cdc_syncs: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MirrorStatusResponse { - #[prost(string, tag="1")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(string, tag="4")] - pub error_message: ::prost::alloc::string::String, - #[prost(oneof="mirror_status_response::Status", tags="2, 3")] - pub status: ::core::option::Option, -} -/// Nested message and enum types in `MirrorStatusResponse`. -pub mod mirror_status_response { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Status { - #[prost(message, tag="2")] - QrepStatus(super::QRepMirrorStatus), - #[prost(message, tag="3")] - CdcStatus(super::CdcMirrorStatus), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FlowStateChangeRequest { - #[prost(string, tag="1")] - pub workflow_id: ::prost::alloc::string::String, - #[prost(string, tag="2")] - pub flow_job_name: ::prost::alloc::string::String, - #[prost(enumeration="FlowState", tag="3")] - pub requested_flow_state: i32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FlowStateChangeResponse { - #[prost(bool, tag="1")] - pub ok: bool, - #[prost(string, tag="2")] - pub error_message: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PeerDbVersionRequest { -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PeerDbVersionResponse { - #[prost(string, tag="1")] - pub version: ::prost::alloc::string::String, -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ValidatePeerStatus { - CreationUnknown = 0, - Valid = 1, - Invalid = 2, -} -impl ValidatePeerStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ValidatePeerStatus::CreationUnknown => "CREATION_UNKNOWN", - ValidatePeerStatus::Valid => "VALID", - ValidatePeerStatus::Invalid => "INVALID", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "CREATION_UNKNOWN" => Some(Self::CreationUnknown), - "VALID" => Some(Self::Valid), - "INVALID" => Some(Self::Invalid), - _ => None, - } - } -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum CreatePeerStatus { - ValidationUnknown = 0, - Created = 1, - Failed = 2, -} -impl CreatePeerStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - CreatePeerStatus::ValidationUnknown => "VALIDATION_UNKNOWN", - CreatePeerStatus::Created => "CREATED", - CreatePeerStatus::Failed => "FAILED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "VALIDATION_UNKNOWN" => Some(Self::ValidationUnknown), - "CREATED" => Some(Self::Created), - "FAILED" => Some(Self::Failed), - _ => None, - } - } -} -/// in the future, consider moving DropFlow to this and reduce route surface -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum FlowState { - StateUnknown = 0, - StateRunning = 1, - StatePaused = 2, -} -impl FlowState { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - FlowState::StateUnknown => "STATE_UNKNOWN", - FlowState::StateRunning => "STATE_RUNNING", - FlowState::StatePaused => "STATE_PAUSED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "STATE_UNKNOWN" => Some(Self::StateUnknown), - "STATE_RUNNING" => Some(Self::StateRunning), - "STATE_PAUSED" => Some(Self::StatePaused), - _ => None, - } - } -} -include!("peerdb_route.tonic.rs"); -include!("peerdb_route.serde.rs"); -// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/nexus/pt/src/peerdb_route.serde.rs b/nexus/pt/src/peerdb_route.serde.rs deleted file mode 100644 index 5f2c96d4f2..0000000000 --- a/nexus/pt/src/peerdb_route.serde.rs +++ /dev/null @@ -1,4188 +0,0 @@ -// @generated -impl serde::Serialize for AllTablesResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.tables.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.AllTablesResponse", len)?; - if !self.tables.is_empty() { - struct_ser.serialize_field("tables", &self.tables)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for AllTablesResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "tables", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Tables, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "tables" => Ok(GeneratedField::Tables), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = AllTablesResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.AllTablesResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut tables__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Tables => { - if tables__.is_some() { - return Err(serde::de::Error::duplicate_field("tables")); - } - tables__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(AllTablesResponse { - tables: tables__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.AllTablesResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CdcMirrorStatus { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.config.is_some() { - len += 1; - } - if self.snapshot_status.is_some() { - len += 1; - } - if !self.cdc_syncs.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.CDCMirrorStatus", len)?; - if let Some(v) = self.config.as_ref() { - struct_ser.serialize_field("config", v)?; - } - if let Some(v) = self.snapshot_status.as_ref() { - struct_ser.serialize_field("snapshotStatus", v)?; - } - if !self.cdc_syncs.is_empty() { - struct_ser.serialize_field("cdcSyncs", &self.cdc_syncs)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CdcMirrorStatus { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "config", - "snapshot_status", - "snapshotStatus", - "cdc_syncs", - "cdcSyncs", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Config, - SnapshotStatus, - CdcSyncs, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "config" => Ok(GeneratedField::Config), - "snapshotStatus" | "snapshot_status" => Ok(GeneratedField::SnapshotStatus), - "cdcSyncs" | "cdc_syncs" => Ok(GeneratedField::CdcSyncs), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CdcMirrorStatus; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.CDCMirrorStatus") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut config__ = None; - let mut snapshot_status__ = None; - let mut cdc_syncs__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Config => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("config")); - } - config__ = map.next_value()?; - } - GeneratedField::SnapshotStatus => { - if snapshot_status__.is_some() { - return Err(serde::de::Error::duplicate_field("snapshotStatus")); - } - snapshot_status__ = map.next_value()?; - } - GeneratedField::CdcSyncs => { - if cdc_syncs__.is_some() { - return Err(serde::de::Error::duplicate_field("cdcSyncs")); - } - cdc_syncs__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CdcMirrorStatus { - config: config__, - snapshot_status: snapshot_status__, - cdc_syncs: cdc_syncs__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.CDCMirrorStatus", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CdcSyncStatus { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.start_lsn != 0 { - len += 1; - } - if self.end_lsn != 0 { - len += 1; - } - if self.num_rows != 0 { - len += 1; - } - if self.start_time.is_some() { - len += 1; - } - if self.end_time.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.CDCSyncStatus", len)?; - if self.start_lsn != 0 { - struct_ser.serialize_field("startLsn", ToString::to_string(&self.start_lsn).as_str())?; - } - if self.end_lsn != 0 { - struct_ser.serialize_field("endLsn", ToString::to_string(&self.end_lsn).as_str())?; - } - if self.num_rows != 0 { - struct_ser.serialize_field("numRows", &self.num_rows)?; - } - if let Some(v) = self.start_time.as_ref() { - struct_ser.serialize_field("startTime", v)?; - } - if let Some(v) = self.end_time.as_ref() { - struct_ser.serialize_field("endTime", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CdcSyncStatus { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "start_lsn", - "startLsn", - "end_lsn", - "endLsn", - "num_rows", - "numRows", - "start_time", - "startTime", - "end_time", - "endTime", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - StartLsn, - EndLsn, - NumRows, - StartTime, - EndTime, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "startLsn" | "start_lsn" => Ok(GeneratedField::StartLsn), - "endLsn" | "end_lsn" => Ok(GeneratedField::EndLsn), - "numRows" | "num_rows" => Ok(GeneratedField::NumRows), - "startTime" | "start_time" => Ok(GeneratedField::StartTime), - "endTime" | "end_time" => Ok(GeneratedField::EndTime), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CdcSyncStatus; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.CDCSyncStatus") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut start_lsn__ = None; - let mut end_lsn__ = None; - let mut num_rows__ = None; - let mut start_time__ = None; - let mut end_time__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::StartLsn => { - if start_lsn__.is_some() { - return Err(serde::de::Error::duplicate_field("startLsn")); - } - start_lsn__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::EndLsn => { - if end_lsn__.is_some() { - return Err(serde::de::Error::duplicate_field("endLsn")); - } - end_lsn__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::NumRows => { - if num_rows__.is_some() { - return Err(serde::de::Error::duplicate_field("numRows")); - } - num_rows__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::StartTime => { - if start_time__.is_some() { - return Err(serde::de::Error::duplicate_field("startTime")); - } - start_time__ = map.next_value()?; - } - GeneratedField::EndTime => { - if end_time__.is_some() { - return Err(serde::de::Error::duplicate_field("endTime")); - } - end_time__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CdcSyncStatus { - start_lsn: start_lsn__.unwrap_or_default(), - end_lsn: end_lsn__.unwrap_or_default(), - num_rows: num_rows__.unwrap_or_default(), - start_time: start_time__, - end_time: end_time__, - }) - } - } - deserializer.deserialize_struct("peerdb_route.CDCSyncStatus", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CreateCdcFlowRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.connection_configs.is_some() { - len += 1; - } - if self.create_catalog_entry { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.CreateCDCFlowRequest", len)?; - if let Some(v) = self.connection_configs.as_ref() { - struct_ser.serialize_field("connectionConfigs", v)?; - } - if self.create_catalog_entry { - struct_ser.serialize_field("createCatalogEntry", &self.create_catalog_entry)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreateCdcFlowRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "connection_configs", - "connectionConfigs", - "create_catalog_entry", - "createCatalogEntry", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - ConnectionConfigs, - CreateCatalogEntry, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "connectionConfigs" | "connection_configs" => Ok(GeneratedField::ConnectionConfigs), - "createCatalogEntry" | "create_catalog_entry" => Ok(GeneratedField::CreateCatalogEntry), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreateCdcFlowRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.CreateCDCFlowRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut connection_configs__ = None; - let mut create_catalog_entry__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::ConnectionConfigs => { - if connection_configs__.is_some() { - return Err(serde::de::Error::duplicate_field("connectionConfigs")); - } - connection_configs__ = map.next_value()?; - } - GeneratedField::CreateCatalogEntry => { - if create_catalog_entry__.is_some() { - return Err(serde::de::Error::duplicate_field("createCatalogEntry")); - } - create_catalog_entry__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreateCdcFlowRequest { - connection_configs: connection_configs__, - create_catalog_entry: create_catalog_entry__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.CreateCDCFlowRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CreateCdcFlowResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.worflow_id.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.CreateCDCFlowResponse", len)?; - if !self.worflow_id.is_empty() { - struct_ser.serialize_field("worflowId", &self.worflow_id)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreateCdcFlowResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "worflow_id", - "worflowId", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - WorflowId, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "worflowId" | "worflow_id" => Ok(GeneratedField::WorflowId), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreateCdcFlowResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.CreateCDCFlowResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut worflow_id__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::WorflowId => { - if worflow_id__.is_some() { - return Err(serde::de::Error::duplicate_field("worflowId")); - } - worflow_id__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreateCdcFlowResponse { - worflow_id: worflow_id__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.CreateCDCFlowResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CreatePeerRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.CreatePeerRequest", len)?; - if let Some(v) = self.peer.as_ref() { - struct_ser.serialize_field("peer", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreatePeerRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Peer, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peer" => Ok(GeneratedField::Peer), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreatePeerRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.CreatePeerRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Peer => { - if peer__.is_some() { - return Err(serde::de::Error::duplicate_field("peer")); - } - peer__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreatePeerRequest { - peer: peer__, - }) - } - } - deserializer.deserialize_struct("peerdb_route.CreatePeerRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CreatePeerResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.status != 0 { - len += 1; - } - if !self.message.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.CreatePeerResponse", len)?; - if self.status != 0 { - let v = CreatePeerStatus::from_i32(self.status) - .ok_or_else(|| serde::ser::Error::custom(format!("Invalid variant {}", self.status)))?; - struct_ser.serialize_field("status", &v)?; - } - if !self.message.is_empty() { - struct_ser.serialize_field("message", &self.message)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreatePeerResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "status", - "message", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Status, - Message, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "status" => Ok(GeneratedField::Status), - "message" => Ok(GeneratedField::Message), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreatePeerResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.CreatePeerResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut status__ = None; - let mut message__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Status => { - if status__.is_some() { - return Err(serde::de::Error::duplicate_field("status")); - } - status__ = Some(map.next_value::()? as i32); - } - GeneratedField::Message => { - if message__.is_some() { - return Err(serde::de::Error::duplicate_field("message")); - } - message__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreatePeerResponse { - status: status__.unwrap_or_default(), - message: message__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.CreatePeerResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CreatePeerStatus { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - let variant = match self { - Self::ValidationUnknown => "VALIDATION_UNKNOWN", - Self::Created => "CREATED", - Self::Failed => "FAILED", - }; - serializer.serialize_str(variant) - } -} -impl<'de> serde::Deserialize<'de> for CreatePeerStatus { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "VALIDATION_UNKNOWN", - "CREATED", - "FAILED", - ]; - - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreatePeerStatus; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - fn visit_i64(self, v: i64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(CreatePeerStatus::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Signed(v), &self) - }) - } - - fn visit_u64(self, v: u64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(CreatePeerStatus::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Unsigned(v), &self) - }) - } - - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "VALIDATION_UNKNOWN" => Ok(CreatePeerStatus::ValidationUnknown), - "CREATED" => Ok(CreatePeerStatus::Created), - "FAILED" => Ok(CreatePeerStatus::Failed), - _ => Err(serde::de::Error::unknown_variant(value, FIELDS)), - } - } - } - deserializer.deserialize_any(GeneratedVisitor) - } -} -impl serde::Serialize for CreateQRepFlowRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.qrep_config.is_some() { - len += 1; - } - if self.create_catalog_entry { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.CreateQRepFlowRequest", len)?; - if let Some(v) = self.qrep_config.as_ref() { - struct_ser.serialize_field("qrepConfig", v)?; - } - if self.create_catalog_entry { - struct_ser.serialize_field("createCatalogEntry", &self.create_catalog_entry)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreateQRepFlowRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "qrep_config", - "qrepConfig", - "create_catalog_entry", - "createCatalogEntry", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - QrepConfig, - CreateCatalogEntry, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "qrepConfig" | "qrep_config" => Ok(GeneratedField::QrepConfig), - "createCatalogEntry" | "create_catalog_entry" => Ok(GeneratedField::CreateCatalogEntry), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreateQRepFlowRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.CreateQRepFlowRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut qrep_config__ = None; - let mut create_catalog_entry__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::QrepConfig => { - if qrep_config__.is_some() { - return Err(serde::de::Error::duplicate_field("qrepConfig")); - } - qrep_config__ = map.next_value()?; - } - GeneratedField::CreateCatalogEntry => { - if create_catalog_entry__.is_some() { - return Err(serde::de::Error::duplicate_field("createCatalogEntry")); - } - create_catalog_entry__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreateQRepFlowRequest { - qrep_config: qrep_config__, - create_catalog_entry: create_catalog_entry__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.CreateQRepFlowRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for CreateQRepFlowResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.worflow_id.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.CreateQRepFlowResponse", len)?; - if !self.worflow_id.is_empty() { - struct_ser.serialize_field("worflowId", &self.worflow_id)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for CreateQRepFlowResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "worflow_id", - "worflowId", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - WorflowId, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "worflowId" | "worflow_id" => Ok(GeneratedField::WorflowId), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = CreateQRepFlowResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.CreateQRepFlowResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut worflow_id__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::WorflowId => { - if worflow_id__.is_some() { - return Err(serde::de::Error::duplicate_field("worflowId")); - } - worflow_id__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(CreateQRepFlowResponse { - worflow_id: worflow_id__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.CreateQRepFlowResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for DropPeerRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.peer_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.DropPeerRequest", len)?; - if !self.peer_name.is_empty() { - struct_ser.serialize_field("peerName", &self.peer_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for DropPeerRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_name", - "peerName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerName" | "peer_name" => Ok(GeneratedField::PeerName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = DropPeerRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.DropPeerRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerName => { - if peer_name__.is_some() { - return Err(serde::de::Error::duplicate_field("peerName")); - } - peer_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(DropPeerRequest { - peer_name: peer_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.DropPeerRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for DropPeerResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.ok { - len += 1; - } - if !self.error_message.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.DropPeerResponse", len)?; - if self.ok { - struct_ser.serialize_field("ok", &self.ok)?; - } - if !self.error_message.is_empty() { - struct_ser.serialize_field("errorMessage", &self.error_message)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for DropPeerResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "ok", - "error_message", - "errorMessage", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Ok, - ErrorMessage, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "ok" => Ok(GeneratedField::Ok), - "errorMessage" | "error_message" => Ok(GeneratedField::ErrorMessage), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = DropPeerResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.DropPeerResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut ok__ = None; - let mut error_message__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Ok => { - if ok__.is_some() { - return Err(serde::de::Error::duplicate_field("ok")); - } - ok__ = Some(map.next_value()?); - } - GeneratedField::ErrorMessage => { - if error_message__.is_some() { - return Err(serde::de::Error::duplicate_field("errorMessage")); - } - error_message__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(DropPeerResponse { - ok: ok__.unwrap_or_default(), - error_message: error_message__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.DropPeerResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for FlowState { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - let variant = match self { - Self::StateUnknown => "STATE_UNKNOWN", - Self::StateRunning => "STATE_RUNNING", - Self::StatePaused => "STATE_PAUSED", - }; - serializer.serialize_str(variant) - } -} -impl<'de> serde::Deserialize<'de> for FlowState { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "STATE_UNKNOWN", - "STATE_RUNNING", - "STATE_PAUSED", - ]; - - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = FlowState; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - fn visit_i64(self, v: i64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(FlowState::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Signed(v), &self) - }) - } - - fn visit_u64(self, v: u64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(FlowState::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Unsigned(v), &self) - }) - } - - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "STATE_UNKNOWN" => Ok(FlowState::StateUnknown), - "STATE_RUNNING" => Ok(FlowState::StateRunning), - "STATE_PAUSED" => Ok(FlowState::StatePaused), - _ => Err(serde::de::Error::unknown_variant(value, FIELDS)), - } - } - } - deserializer.deserialize_any(GeneratedVisitor) - } -} -impl serde::Serialize for FlowStateChangeRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.workflow_id.is_empty() { - len += 1; - } - if !self.flow_job_name.is_empty() { - len += 1; - } - if self.requested_flow_state != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.FlowStateChangeRequest", len)?; - if !self.workflow_id.is_empty() { - struct_ser.serialize_field("workflowId", &self.workflow_id)?; - } - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if self.requested_flow_state != 0 { - let v = FlowState::from_i32(self.requested_flow_state) - .ok_or_else(|| serde::ser::Error::custom(format!("Invalid variant {}", self.requested_flow_state)))?; - struct_ser.serialize_field("requestedFlowState", &v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for FlowStateChangeRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "workflow_id", - "workflowId", - "flow_job_name", - "flowJobName", - "requested_flow_state", - "requestedFlowState", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - WorkflowId, - FlowJobName, - RequestedFlowState, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "workflowId" | "workflow_id" => Ok(GeneratedField::WorkflowId), - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "requestedFlowState" | "requested_flow_state" => Ok(GeneratedField::RequestedFlowState), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = FlowStateChangeRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.FlowStateChangeRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut workflow_id__ = None; - let mut flow_job_name__ = None; - let mut requested_flow_state__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::WorkflowId => { - if workflow_id__.is_some() { - return Err(serde::de::Error::duplicate_field("workflowId")); - } - workflow_id__ = Some(map.next_value()?); - } - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::RequestedFlowState => { - if requested_flow_state__.is_some() { - return Err(serde::de::Error::duplicate_field("requestedFlowState")); - } - requested_flow_state__ = Some(map.next_value::()? as i32); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(FlowStateChangeRequest { - workflow_id: workflow_id__.unwrap_or_default(), - flow_job_name: flow_job_name__.unwrap_or_default(), - requested_flow_state: requested_flow_state__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.FlowStateChangeRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for FlowStateChangeResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.ok { - len += 1; - } - if !self.error_message.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.FlowStateChangeResponse", len)?; - if self.ok { - struct_ser.serialize_field("ok", &self.ok)?; - } - if !self.error_message.is_empty() { - struct_ser.serialize_field("errorMessage", &self.error_message)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for FlowStateChangeResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "ok", - "error_message", - "errorMessage", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Ok, - ErrorMessage, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "ok" => Ok(GeneratedField::Ok), - "errorMessage" | "error_message" => Ok(GeneratedField::ErrorMessage), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = FlowStateChangeResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.FlowStateChangeResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut ok__ = None; - let mut error_message__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Ok => { - if ok__.is_some() { - return Err(serde::de::Error::duplicate_field("ok")); - } - ok__ = Some(map.next_value()?); - } - GeneratedField::ErrorMessage => { - if error_message__.is_some() { - return Err(serde::de::Error::duplicate_field("errorMessage")); - } - error_message__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(FlowStateChangeResponse { - ok: ok__.unwrap_or_default(), - error_message: error_message__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.FlowStateChangeResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for MirrorStatusRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.flow_job_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.MirrorStatusRequest", len)?; - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for MirrorStatusRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_job_name", - "flowJobName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowJobName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = MirrorStatusRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.MirrorStatusRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_job_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(MirrorStatusRequest { - flow_job_name: flow_job_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.MirrorStatusRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for MirrorStatusResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.flow_job_name.is_empty() { - len += 1; - } - if !self.error_message.is_empty() { - len += 1; - } - if self.status.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.MirrorStatusResponse", len)?; - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if !self.error_message.is_empty() { - struct_ser.serialize_field("errorMessage", &self.error_message)?; - } - if let Some(v) = self.status.as_ref() { - match v { - mirror_status_response::Status::QrepStatus(v) => { - struct_ser.serialize_field("qrepStatus", v)?; - } - mirror_status_response::Status::CdcStatus(v) => { - struct_ser.serialize_field("cdcStatus", v)?; - } - } - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for MirrorStatusResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "flow_job_name", - "flowJobName", - "error_message", - "errorMessage", - "qrep_status", - "qrepStatus", - "cdc_status", - "cdcStatus", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - FlowJobName, - ErrorMessage, - QrepStatus, - CdcStatus, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "errorMessage" | "error_message" => Ok(GeneratedField::ErrorMessage), - "qrepStatus" | "qrep_status" => Ok(GeneratedField::QrepStatus), - "cdcStatus" | "cdc_status" => Ok(GeneratedField::CdcStatus), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = MirrorStatusResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.MirrorStatusResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut flow_job_name__ = None; - let mut error_message__ = None; - let mut status__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::ErrorMessage => { - if error_message__.is_some() { - return Err(serde::de::Error::duplicate_field("errorMessage")); - } - error_message__ = Some(map.next_value()?); - } - GeneratedField::QrepStatus => { - if status__.is_some() { - return Err(serde::de::Error::duplicate_field("qrepStatus")); - } - status__ = map.next_value::<::std::option::Option<_>>()?.map(mirror_status_response::Status::QrepStatus) -; - } - GeneratedField::CdcStatus => { - if status__.is_some() { - return Err(serde::de::Error::duplicate_field("cdcStatus")); - } - status__ = map.next_value::<::std::option::Option<_>>()?.map(mirror_status_response::Status::CdcStatus) -; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(MirrorStatusResponse { - flow_job_name: flow_job_name__.unwrap_or_default(), - error_message: error_message__.unwrap_or_default(), - status: status__, - }) - } - } - deserializer.deserialize_struct("peerdb_route.MirrorStatusResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PartitionStatus { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.partition_id.is_empty() { - len += 1; - } - if self.start_time.is_some() { - len += 1; - } - if self.end_time.is_some() { - len += 1; - } - if self.num_rows != 0 { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.PartitionStatus", len)?; - if !self.partition_id.is_empty() { - struct_ser.serialize_field("partitionId", &self.partition_id)?; - } - if let Some(v) = self.start_time.as_ref() { - struct_ser.serialize_field("startTime", v)?; - } - if let Some(v) = self.end_time.as_ref() { - struct_ser.serialize_field("endTime", v)?; - } - if self.num_rows != 0 { - struct_ser.serialize_field("numRows", &self.num_rows)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PartitionStatus { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "partition_id", - "partitionId", - "start_time", - "startTime", - "end_time", - "endTime", - "num_rows", - "numRows", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PartitionId, - StartTime, - EndTime, - NumRows, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "partitionId" | "partition_id" => Ok(GeneratedField::PartitionId), - "startTime" | "start_time" => Ok(GeneratedField::StartTime), - "endTime" | "end_time" => Ok(GeneratedField::EndTime), - "numRows" | "num_rows" => Ok(GeneratedField::NumRows), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PartitionStatus; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.PartitionStatus") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut partition_id__ = None; - let mut start_time__ = None; - let mut end_time__ = None; - let mut num_rows__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PartitionId => { - if partition_id__.is_some() { - return Err(serde::de::Error::duplicate_field("partitionId")); - } - partition_id__ = Some(map.next_value()?); - } - GeneratedField::StartTime => { - if start_time__.is_some() { - return Err(serde::de::Error::duplicate_field("startTime")); - } - start_time__ = map.next_value()?; - } - GeneratedField::EndTime => { - if end_time__.is_some() { - return Err(serde::de::Error::duplicate_field("endTime")); - } - end_time__ = map.next_value()?; - } - GeneratedField::NumRows => { - if num_rows__.is_some() { - return Err(serde::de::Error::duplicate_field("numRows")); - } - num_rows__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PartitionStatus { - partition_id: partition_id__.unwrap_or_default(), - start_time: start_time__, - end_time: end_time__, - num_rows: num_rows__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.PartitionStatus", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PeerDbVersionRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let len = 0; - let struct_ser = serializer.serialize_struct("peerdb_route.PeerDBVersionRequest", len)?; - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PeerDbVersionRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - Ok(GeneratedField::__SkipField__) - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PeerDbVersionRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.PeerDBVersionRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - while map.next_key::()?.is_some() { - let _ = map.next_value::()?; - } - Ok(PeerDbVersionRequest { - }) - } - } - deserializer.deserialize_struct("peerdb_route.PeerDBVersionRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PeerDbVersionResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.version.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.PeerDBVersionResponse", len)?; - if !self.version.is_empty() { - struct_ser.serialize_field("version", &self.version)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PeerDbVersionResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "version", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Version, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "version" => Ok(GeneratedField::Version), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PeerDbVersionResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.PeerDBVersionResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut version__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Version => { - if version__.is_some() { - return Err(serde::de::Error::duplicate_field("version")); - } - version__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PeerDbVersionResponse { - version: version__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.PeerDBVersionResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PeerSchemasResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.schemas.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.PeerSchemasResponse", len)?; - if !self.schemas.is_empty() { - struct_ser.serialize_field("schemas", &self.schemas)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PeerSchemasResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "schemas", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Schemas, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "schemas" => Ok(GeneratedField::Schemas), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PeerSchemasResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.PeerSchemasResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut schemas__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Schemas => { - if schemas__.is_some() { - return Err(serde::de::Error::duplicate_field("schemas")); - } - schemas__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PeerSchemasResponse { - schemas: schemas__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.PeerSchemasResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PeerSlotResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.slot_data.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.PeerSlotResponse", len)?; - if !self.slot_data.is_empty() { - struct_ser.serialize_field("slotData", &self.slot_data)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PeerSlotResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "slot_data", - "slotData", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - SlotData, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "slotData" | "slot_data" => Ok(GeneratedField::SlotData), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PeerSlotResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.PeerSlotResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut slot_data__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::SlotData => { - if slot_data__.is_some() { - return Err(serde::de::Error::duplicate_field("slotData")); - } - slot_data__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PeerSlotResponse { - slot_data: slot_data__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.PeerSlotResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PeerStatResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.stat_data.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.PeerStatResponse", len)?; - if !self.stat_data.is_empty() { - struct_ser.serialize_field("statData", &self.stat_data)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PeerStatResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "stat_data", - "statData", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - StatData, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "statData" | "stat_data" => Ok(GeneratedField::StatData), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PeerStatResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.PeerStatResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut stat_data__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::StatData => { - if stat_data__.is_some() { - return Err(serde::de::Error::duplicate_field("statData")); - } - stat_data__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PeerStatResponse { - stat_data: stat_data__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.PeerStatResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for PostgresPeerActivityInfoRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.peer_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.PostgresPeerActivityInfoRequest", len)?; - if !self.peer_name.is_empty() { - struct_ser.serialize_field("peerName", &self.peer_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for PostgresPeerActivityInfoRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_name", - "peerName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerName" | "peer_name" => Ok(GeneratedField::PeerName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = PostgresPeerActivityInfoRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.PostgresPeerActivityInfoRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerName => { - if peer_name__.is_some() { - return Err(serde::de::Error::duplicate_field("peerName")); - } - peer_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(PostgresPeerActivityInfoRequest { - peer_name: peer_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.PostgresPeerActivityInfoRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for QRepMirrorStatus { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.config.is_some() { - len += 1; - } - if !self.partitions.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.QRepMirrorStatus", len)?; - if let Some(v) = self.config.as_ref() { - struct_ser.serialize_field("config", v)?; - } - if !self.partitions.is_empty() { - struct_ser.serialize_field("partitions", &self.partitions)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for QRepMirrorStatus { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "config", - "partitions", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Config, - Partitions, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "config" => Ok(GeneratedField::Config), - "partitions" => Ok(GeneratedField::Partitions), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = QRepMirrorStatus; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.QRepMirrorStatus") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut config__ = None; - let mut partitions__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Config => { - if config__.is_some() { - return Err(serde::de::Error::duplicate_field("config")); - } - config__ = map.next_value()?; - } - GeneratedField::Partitions => { - if partitions__.is_some() { - return Err(serde::de::Error::duplicate_field("partitions")); - } - partitions__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(QRepMirrorStatus { - config: config__, - partitions: partitions__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.QRepMirrorStatus", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SchemaTablesRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.peer_name.is_empty() { - len += 1; - } - if !self.schema_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.SchemaTablesRequest", len)?; - if !self.peer_name.is_empty() { - struct_ser.serialize_field("peerName", &self.peer_name)?; - } - if !self.schema_name.is_empty() { - struct_ser.serialize_field("schemaName", &self.schema_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SchemaTablesRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_name", - "peerName", - "schema_name", - "schemaName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerName, - SchemaName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerName" | "peer_name" => Ok(GeneratedField::PeerName), - "schemaName" | "schema_name" => Ok(GeneratedField::SchemaName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SchemaTablesRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.SchemaTablesRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_name__ = None; - let mut schema_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerName => { - if peer_name__.is_some() { - return Err(serde::de::Error::duplicate_field("peerName")); - } - peer_name__ = Some(map.next_value()?); - } - GeneratedField::SchemaName => { - if schema_name__.is_some() { - return Err(serde::de::Error::duplicate_field("schemaName")); - } - schema_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SchemaTablesRequest { - peer_name: peer_name__.unwrap_or_default(), - schema_name: schema_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.SchemaTablesRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SchemaTablesResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.tables.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.SchemaTablesResponse", len)?; - if !self.tables.is_empty() { - struct_ser.serialize_field("tables", &self.tables)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SchemaTablesResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "tables", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Tables, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "tables" => Ok(GeneratedField::Tables), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SchemaTablesResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.SchemaTablesResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut tables__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Tables => { - if tables__.is_some() { - return Err(serde::de::Error::duplicate_field("tables")); - } - tables__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SchemaTablesResponse { - tables: tables__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.SchemaTablesResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for ShutdownRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.workflow_id.is_empty() { - len += 1; - } - if !self.flow_job_name.is_empty() { - len += 1; - } - if self.source_peer.is_some() { - len += 1; - } - if self.destination_peer.is_some() { - len += 1; - } - if self.remove_flow_entry { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.ShutdownRequest", len)?; - if !self.workflow_id.is_empty() { - struct_ser.serialize_field("workflowId", &self.workflow_id)?; - } - if !self.flow_job_name.is_empty() { - struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; - } - if let Some(v) = self.source_peer.as_ref() { - struct_ser.serialize_field("sourcePeer", v)?; - } - if let Some(v) = self.destination_peer.as_ref() { - struct_ser.serialize_field("destinationPeer", v)?; - } - if self.remove_flow_entry { - struct_ser.serialize_field("removeFlowEntry", &self.remove_flow_entry)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for ShutdownRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "workflow_id", - "workflowId", - "flow_job_name", - "flowJobName", - "source_peer", - "sourcePeer", - "destination_peer", - "destinationPeer", - "remove_flow_entry", - "removeFlowEntry", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - WorkflowId, - FlowJobName, - SourcePeer, - DestinationPeer, - RemoveFlowEntry, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "workflowId" | "workflow_id" => Ok(GeneratedField::WorkflowId), - "flowJobName" | "flow_job_name" => Ok(GeneratedField::FlowJobName), - "sourcePeer" | "source_peer" => Ok(GeneratedField::SourcePeer), - "destinationPeer" | "destination_peer" => Ok(GeneratedField::DestinationPeer), - "removeFlowEntry" | "remove_flow_entry" => Ok(GeneratedField::RemoveFlowEntry), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = ShutdownRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.ShutdownRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut workflow_id__ = None; - let mut flow_job_name__ = None; - let mut source_peer__ = None; - let mut destination_peer__ = None; - let mut remove_flow_entry__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::WorkflowId => { - if workflow_id__.is_some() { - return Err(serde::de::Error::duplicate_field("workflowId")); - } - workflow_id__ = Some(map.next_value()?); - } - GeneratedField::FlowJobName => { - if flow_job_name__.is_some() { - return Err(serde::de::Error::duplicate_field("flowJobName")); - } - flow_job_name__ = Some(map.next_value()?); - } - GeneratedField::SourcePeer => { - if source_peer__.is_some() { - return Err(serde::de::Error::duplicate_field("sourcePeer")); - } - source_peer__ = map.next_value()?; - } - GeneratedField::DestinationPeer => { - if destination_peer__.is_some() { - return Err(serde::de::Error::duplicate_field("destinationPeer")); - } - destination_peer__ = map.next_value()?; - } - GeneratedField::RemoveFlowEntry => { - if remove_flow_entry__.is_some() { - return Err(serde::de::Error::duplicate_field("removeFlowEntry")); - } - remove_flow_entry__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(ShutdownRequest { - workflow_id: workflow_id__.unwrap_or_default(), - flow_job_name: flow_job_name__.unwrap_or_default(), - source_peer: source_peer__, - destination_peer: destination_peer__, - remove_flow_entry: remove_flow_entry__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.ShutdownRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for ShutdownResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.ok { - len += 1; - } - if !self.error_message.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.ShutdownResponse", len)?; - if self.ok { - struct_ser.serialize_field("ok", &self.ok)?; - } - if !self.error_message.is_empty() { - struct_ser.serialize_field("errorMessage", &self.error_message)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for ShutdownResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "ok", - "error_message", - "errorMessage", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Ok, - ErrorMessage, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "ok" => Ok(GeneratedField::Ok), - "errorMessage" | "error_message" => Ok(GeneratedField::ErrorMessage), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = ShutdownResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.ShutdownResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut ok__ = None; - let mut error_message__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Ok => { - if ok__.is_some() { - return Err(serde::de::Error::duplicate_field("ok")); - } - ok__ = Some(map.next_value()?); - } - GeneratedField::ErrorMessage => { - if error_message__.is_some() { - return Err(serde::de::Error::duplicate_field("errorMessage")); - } - error_message__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(ShutdownResponse { - ok: ok__.unwrap_or_default(), - error_message: error_message__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.ShutdownResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SlotInfo { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.slot_name.is_empty() { - len += 1; - } - if !self.redo_l_sn.is_empty() { - len += 1; - } - if !self.restart_l_sn.is_empty() { - len += 1; - } - if self.active { - len += 1; - } - if self.lag_in_mb != 0. { - len += 1; - } - if !self.confirmed_flush_l_sn.is_empty() { - len += 1; - } - if !self.wal_status.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.SlotInfo", len)?; - if !self.slot_name.is_empty() { - struct_ser.serialize_field("slotName", &self.slot_name)?; - } - if !self.redo_l_sn.is_empty() { - struct_ser.serialize_field("redoLSN", &self.redo_l_sn)?; - } - if !self.restart_l_sn.is_empty() { - struct_ser.serialize_field("restartLSN", &self.restart_l_sn)?; - } - if self.active { - struct_ser.serialize_field("active", &self.active)?; - } - if self.lag_in_mb != 0. { - struct_ser.serialize_field("lagInMb", &self.lag_in_mb)?; - } - if !self.confirmed_flush_l_sn.is_empty() { - struct_ser.serialize_field("confirmedFlushLSN", &self.confirmed_flush_l_sn)?; - } - if !self.wal_status.is_empty() { - struct_ser.serialize_field("walStatus", &self.wal_status)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SlotInfo { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "slot_name", - "slotName", - "redo_lSN", - "redoLSN", - "restart_lSN", - "restartLSN", - "active", - "lag_in_mb", - "lagInMb", - "confirmed_flush_lSN", - "confirmedFlushLSN", - "wal_status", - "walStatus", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - SlotName, - RedoLSn, - RestartLSn, - Active, - LagInMb, - ConfirmedFlushLSn, - WalStatus, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "slotName" | "slot_name" => Ok(GeneratedField::SlotName), - "redoLSN" | "redo_lSN" => Ok(GeneratedField::RedoLSn), - "restartLSN" | "restart_lSN" => Ok(GeneratedField::RestartLSn), - "active" => Ok(GeneratedField::Active), - "lagInMb" | "lag_in_mb" => Ok(GeneratedField::LagInMb), - "confirmedFlushLSN" | "confirmed_flush_lSN" => Ok(GeneratedField::ConfirmedFlushLSn), - "walStatus" | "wal_status" => Ok(GeneratedField::WalStatus), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SlotInfo; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.SlotInfo") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut slot_name__ = None; - let mut redo_l_sn__ = None; - let mut restart_l_sn__ = None; - let mut active__ = None; - let mut lag_in_mb__ = None; - let mut confirmed_flush_l_sn__ = None; - let mut wal_status__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::SlotName => { - if slot_name__.is_some() { - return Err(serde::de::Error::duplicate_field("slotName")); - } - slot_name__ = Some(map.next_value()?); - } - GeneratedField::RedoLSn => { - if redo_l_sn__.is_some() { - return Err(serde::de::Error::duplicate_field("redoLSN")); - } - redo_l_sn__ = Some(map.next_value()?); - } - GeneratedField::RestartLSn => { - if restart_l_sn__.is_some() { - return Err(serde::de::Error::duplicate_field("restartLSN")); - } - restart_l_sn__ = Some(map.next_value()?); - } - GeneratedField::Active => { - if active__.is_some() { - return Err(serde::de::Error::duplicate_field("active")); - } - active__ = Some(map.next_value()?); - } - GeneratedField::LagInMb => { - if lag_in_mb__.is_some() { - return Err(serde::de::Error::duplicate_field("lagInMb")); - } - lag_in_mb__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::ConfirmedFlushLSn => { - if confirmed_flush_l_sn__.is_some() { - return Err(serde::de::Error::duplicate_field("confirmedFlushLSN")); - } - confirmed_flush_l_sn__ = Some(map.next_value()?); - } - GeneratedField::WalStatus => { - if wal_status__.is_some() { - return Err(serde::de::Error::duplicate_field("walStatus")); - } - wal_status__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SlotInfo { - slot_name: slot_name__.unwrap_or_default(), - redo_l_sn: redo_l_sn__.unwrap_or_default(), - restart_l_sn: restart_l_sn__.unwrap_or_default(), - active: active__.unwrap_or_default(), - lag_in_mb: lag_in_mb__.unwrap_or_default(), - confirmed_flush_l_sn: confirmed_flush_l_sn__.unwrap_or_default(), - wal_status: wal_status__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.SlotInfo", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for SnapshotStatus { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.clones.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.SnapshotStatus", len)?; - if !self.clones.is_empty() { - struct_ser.serialize_field("clones", &self.clones)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for SnapshotStatus { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "clones", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Clones, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "clones" => Ok(GeneratedField::Clones), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = SnapshotStatus; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.SnapshotStatus") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut clones__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Clones => { - if clones__.is_some() { - return Err(serde::de::Error::duplicate_field("clones")); - } - clones__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(SnapshotStatus { - clones: clones__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.SnapshotStatus", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for StatInfo { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.pid != 0 { - len += 1; - } - if !self.wait_event.is_empty() { - len += 1; - } - if !self.wait_event_type.is_empty() { - len += 1; - } - if !self.query_start.is_empty() { - len += 1; - } - if !self.query.is_empty() { - len += 1; - } - if self.duration != 0. { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.StatInfo", len)?; - if self.pid != 0 { - struct_ser.serialize_field("pid", ToString::to_string(&self.pid).as_str())?; - } - if !self.wait_event.is_empty() { - struct_ser.serialize_field("waitEvent", &self.wait_event)?; - } - if !self.wait_event_type.is_empty() { - struct_ser.serialize_field("waitEventType", &self.wait_event_type)?; - } - if !self.query_start.is_empty() { - struct_ser.serialize_field("queryStart", &self.query_start)?; - } - if !self.query.is_empty() { - struct_ser.serialize_field("query", &self.query)?; - } - if self.duration != 0. { - struct_ser.serialize_field("duration", &self.duration)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for StatInfo { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "pid", - "wait_event", - "waitEvent", - "wait_event_type", - "waitEventType", - "query_start", - "queryStart", - "query", - "duration", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Pid, - WaitEvent, - WaitEventType, - QueryStart, - Query, - Duration, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "pid" => Ok(GeneratedField::Pid), - "waitEvent" | "wait_event" => Ok(GeneratedField::WaitEvent), - "waitEventType" | "wait_event_type" => Ok(GeneratedField::WaitEventType), - "queryStart" | "query_start" => Ok(GeneratedField::QueryStart), - "query" => Ok(GeneratedField::Query), - "duration" => Ok(GeneratedField::Duration), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = StatInfo; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.StatInfo") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut pid__ = None; - let mut wait_event__ = None; - let mut wait_event_type__ = None; - let mut query_start__ = None; - let mut query__ = None; - let mut duration__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Pid => { - if pid__.is_some() { - return Err(serde::de::Error::duplicate_field("pid")); - } - pid__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::WaitEvent => { - if wait_event__.is_some() { - return Err(serde::de::Error::duplicate_field("waitEvent")); - } - wait_event__ = Some(map.next_value()?); - } - GeneratedField::WaitEventType => { - if wait_event_type__.is_some() { - return Err(serde::de::Error::duplicate_field("waitEventType")); - } - wait_event_type__ = Some(map.next_value()?); - } - GeneratedField::QueryStart => { - if query_start__.is_some() { - return Err(serde::de::Error::duplicate_field("queryStart")); - } - query_start__ = Some(map.next_value()?); - } - GeneratedField::Query => { - if query__.is_some() { - return Err(serde::de::Error::duplicate_field("query")); - } - query__ = Some(map.next_value()?); - } - GeneratedField::Duration => { - if duration__.is_some() { - return Err(serde::de::Error::duplicate_field("duration")); - } - duration__ = - Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(StatInfo { - pid: pid__.unwrap_or_default(), - wait_event: wait_event__.unwrap_or_default(), - wait_event_type: wait_event_type__.unwrap_or_default(), - query_start: query_start__.unwrap_or_default(), - query: query__.unwrap_or_default(), - duration: duration__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.StatInfo", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for TableColumnsRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.peer_name.is_empty() { - len += 1; - } - if !self.schema_name.is_empty() { - len += 1; - } - if !self.table_name.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.TableColumnsRequest", len)?; - if !self.peer_name.is_empty() { - struct_ser.serialize_field("peerName", &self.peer_name)?; - } - if !self.schema_name.is_empty() { - struct_ser.serialize_field("schemaName", &self.schema_name)?; - } - if !self.table_name.is_empty() { - struct_ser.serialize_field("tableName", &self.table_name)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for TableColumnsRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer_name", - "peerName", - "schema_name", - "schemaName", - "table_name", - "tableName", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - PeerName, - SchemaName, - TableName, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peerName" | "peer_name" => Ok(GeneratedField::PeerName), - "schemaName" | "schema_name" => Ok(GeneratedField::SchemaName), - "tableName" | "table_name" => Ok(GeneratedField::TableName), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = TableColumnsRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.TableColumnsRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer_name__ = None; - let mut schema_name__ = None; - let mut table_name__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::PeerName => { - if peer_name__.is_some() { - return Err(serde::de::Error::duplicate_field("peerName")); - } - peer_name__ = Some(map.next_value()?); - } - GeneratedField::SchemaName => { - if schema_name__.is_some() { - return Err(serde::de::Error::duplicate_field("schemaName")); - } - schema_name__ = Some(map.next_value()?); - } - GeneratedField::TableName => { - if table_name__.is_some() { - return Err(serde::de::Error::duplicate_field("tableName")); - } - table_name__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(TableColumnsRequest { - peer_name: peer_name__.unwrap_or_default(), - schema_name: schema_name__.unwrap_or_default(), - table_name: table_name__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.TableColumnsRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for TableColumnsResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.columns.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.TableColumnsResponse", len)?; - if !self.columns.is_empty() { - struct_ser.serialize_field("columns", &self.columns)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for TableColumnsResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "columns", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Columns, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "columns" => Ok(GeneratedField::Columns), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = TableColumnsResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.TableColumnsResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut columns__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Columns => { - if columns__.is_some() { - return Err(serde::de::Error::duplicate_field("columns")); - } - columns__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(TableColumnsResponse { - columns: columns__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.TableColumnsResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for ValidatePeerRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.peer.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.ValidatePeerRequest", len)?; - if let Some(v) = self.peer.as_ref() { - struct_ser.serialize_field("peer", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for ValidatePeerRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "peer", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Peer, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "peer" => Ok(GeneratedField::Peer), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = ValidatePeerRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.ValidatePeerRequest") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut peer__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Peer => { - if peer__.is_some() { - return Err(serde::de::Error::duplicate_field("peer")); - } - peer__ = map.next_value()?; - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(ValidatePeerRequest { - peer: peer__, - }) - } - } - deserializer.deserialize_struct("peerdb_route.ValidatePeerRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for ValidatePeerResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.status != 0 { - len += 1; - } - if !self.message.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("peerdb_route.ValidatePeerResponse", len)?; - if self.status != 0 { - let v = ValidatePeerStatus::from_i32(self.status) - .ok_or_else(|| serde::ser::Error::custom(format!("Invalid variant {}", self.status)))?; - struct_ser.serialize_field("status", &v)?; - } - if !self.message.is_empty() { - struct_ser.serialize_field("message", &self.message)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for ValidatePeerResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "status", - "message", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Status, - Message, - __SkipField__, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "status" => Ok(GeneratedField::Status), - "message" => Ok(GeneratedField::Message), - _ => Ok(GeneratedField::__SkipField__), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = ValidatePeerResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct peerdb_route.ValidatePeerResponse") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut status__ = None; - let mut message__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Status => { - if status__.is_some() { - return Err(serde::de::Error::duplicate_field("status")); - } - status__ = Some(map.next_value::()? as i32); - } - GeneratedField::Message => { - if message__.is_some() { - return Err(serde::de::Error::duplicate_field("message")); - } - message__ = Some(map.next_value()?); - } - GeneratedField::__SkipField__ => { - let _ = map.next_value::()?; - } - } - } - Ok(ValidatePeerResponse { - status: status__.unwrap_or_default(), - message: message__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("peerdb_route.ValidatePeerResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for ValidatePeerStatus { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - let variant = match self { - Self::CreationUnknown => "CREATION_UNKNOWN", - Self::Valid => "VALID", - Self::Invalid => "INVALID", - }; - serializer.serialize_str(variant) - } -} -impl<'de> serde::Deserialize<'de> for ValidatePeerStatus { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "CREATION_UNKNOWN", - "VALID", - "INVALID", - ]; - - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = ValidatePeerStatus; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - fn visit_i64(self, v: i64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(ValidatePeerStatus::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Signed(v), &self) - }) - } - - fn visit_u64(self, v: u64) -> std::result::Result - where - E: serde::de::Error, - { - use std::convert::TryFrom; - i32::try_from(v) - .ok() - .and_then(ValidatePeerStatus::from_i32) - .ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Unsigned(v), &self) - }) - } - - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "CREATION_UNKNOWN" => Ok(ValidatePeerStatus::CreationUnknown), - "VALID" => Ok(ValidatePeerStatus::Valid), - "INVALID" => Ok(ValidatePeerStatus::Invalid), - _ => Err(serde::de::Error::unknown_variant(value, FIELDS)), - } - } - } - deserializer.deserialize_any(GeneratedVisitor) - } -} diff --git a/nexus/pt/src/peerdb_route.tonic.rs b/nexus/pt/src/peerdb_route.tonic.rs deleted file mode 100644 index b6a2c9506d..0000000000 --- a/nexus/pt/src/peerdb_route.tonic.rs +++ /dev/null @@ -1,1418 +0,0 @@ -// @generated -/// Generated client implementations. -pub mod flow_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// - #[derive(Debug, Clone)] - pub struct FlowServiceClient { - inner: tonic::client::Grpc, - } - impl FlowServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl FlowServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> FlowServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - FlowServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// - pub async fn validate_peer( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/ValidatePeer", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "ValidatePeer")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn create_peer( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/CreatePeer", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "CreatePeer")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn drop_peer( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/DropPeer", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "DropPeer")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn create_cdc_flow( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/CreateCDCFlow", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "CreateCDCFlow")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn create_q_rep_flow( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/CreateQRepFlow", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "CreateQRepFlow")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn get_schemas( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/GetSchemas", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "GetSchemas")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn get_tables_in_schema( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/GetTablesInSchema", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("peerdb_route.FlowService", "GetTablesInSchema"), - ); - self.inner.unary(req, path, codec).await - } - /// - pub async fn get_all_tables( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/GetAllTables", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "GetAllTables")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn get_columns( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/GetColumns", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "GetColumns")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn get_slot_info( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/GetSlotInfo", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "GetSlotInfo")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn get_stat_info( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/GetStatInfo", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "GetStatInfo")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn shutdown_flow( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/ShutdownFlow", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "ShutdownFlow")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn flow_state_change( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/FlowStateChange", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "FlowStateChange")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn mirror_status( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/MirrorStatus", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "MirrorStatus")); - self.inner.unary(req, path, codec).await - } - /// - pub async fn get_version( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/peerdb_route.FlowService/GetVersion", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("peerdb_route.FlowService", "GetVersion")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod flow_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with FlowServiceServer. - #[async_trait] - pub trait FlowService: Send + Sync + 'static { - /// - async fn validate_peer( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn create_peer( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn drop_peer( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn create_cdc_flow( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn create_q_rep_flow( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn get_schemas( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn get_tables_in_schema( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn get_all_tables( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn get_columns( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn get_slot_info( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn get_stat_info( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn shutdown_flow( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn flow_state_change( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn mirror_status( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// - async fn get_version( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// - #[derive(Debug)] - pub struct FlowServiceServer { - inner: _Inner, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - struct _Inner(Arc); - impl FlowServiceServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for FlowServiceServer - where - T: FlowService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); - match req.uri().path() { - "/peerdb_route.FlowService/ValidatePeer" => { - #[allow(non_camel_case_types)] - struct ValidatePeerSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for ValidatePeerSvc { - type Response = super::ValidatePeerResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).validate_peer(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = ValidatePeerSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/CreatePeer" => { - #[allow(non_camel_case_types)] - struct CreatePeerSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for CreatePeerSvc { - type Response = super::CreatePeerResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { (*inner).create_peer(request).await }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = CreatePeerSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/DropPeer" => { - #[allow(non_camel_case_types)] - struct DropPeerSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for DropPeerSvc { - type Response = super::DropPeerResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { (*inner).drop_peer(request).await }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = DropPeerSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/CreateCDCFlow" => { - #[allow(non_camel_case_types)] - struct CreateCDCFlowSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for CreateCDCFlowSvc { - type Response = super::CreateCdcFlowResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).create_cdc_flow(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = CreateCDCFlowSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/CreateQRepFlow" => { - #[allow(non_camel_case_types)] - struct CreateQRepFlowSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for CreateQRepFlowSvc { - type Response = super::CreateQRepFlowResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).create_q_rep_flow(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = CreateQRepFlowSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/GetSchemas" => { - #[allow(non_camel_case_types)] - struct GetSchemasSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for GetSchemasSvc { - type Response = super::PeerSchemasResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::PostgresPeerActivityInfoRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { (*inner).get_schemas(request).await }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = GetSchemasSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/GetTablesInSchema" => { - #[allow(non_camel_case_types)] - struct GetTablesInSchemaSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for GetTablesInSchemaSvc { - type Response = super::SchemaTablesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).get_tables_in_schema(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = GetTablesInSchemaSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/GetAllTables" => { - #[allow(non_camel_case_types)] - struct GetAllTablesSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for GetAllTablesSvc { - type Response = super::AllTablesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::PostgresPeerActivityInfoRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).get_all_tables(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = GetAllTablesSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/GetColumns" => { - #[allow(non_camel_case_types)] - struct GetColumnsSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for GetColumnsSvc { - type Response = super::TableColumnsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { (*inner).get_columns(request).await }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = GetColumnsSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/GetSlotInfo" => { - #[allow(non_camel_case_types)] - struct GetSlotInfoSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for GetSlotInfoSvc { - type Response = super::PeerSlotResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::PostgresPeerActivityInfoRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).get_slot_info(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = GetSlotInfoSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/GetStatInfo" => { - #[allow(non_camel_case_types)] - struct GetStatInfoSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for GetStatInfoSvc { - type Response = super::PeerStatResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::PostgresPeerActivityInfoRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).get_stat_info(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = GetStatInfoSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/ShutdownFlow" => { - #[allow(non_camel_case_types)] - struct ShutdownFlowSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for ShutdownFlowSvc { - type Response = super::ShutdownResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).shutdown_flow(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = ShutdownFlowSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/FlowStateChange" => { - #[allow(non_camel_case_types)] - struct FlowStateChangeSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for FlowStateChangeSvc { - type Response = super::FlowStateChangeResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).flow_state_change(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = FlowStateChangeSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/MirrorStatus" => { - #[allow(non_camel_case_types)] - struct MirrorStatusSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for MirrorStatusSvc { - type Response = super::MirrorStatusResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).mirror_status(request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = MirrorStatusSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/peerdb_route.FlowService/GetVersion" => { - #[allow(non_camel_case_types)] - struct GetVersionSvc(pub Arc); - impl< - T: FlowService, - > tonic::server::UnaryService - for GetVersionSvc { - type Response = super::PeerDbVersionResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { (*inner).get_version(request).await }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = GetVersionSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } - } - } - } - impl Clone for FlowServiceServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for FlowServiceServer { - const NAME: &'static str = "peerdb_route.FlowService"; - } -} diff --git a/ui/grpc_generated/flow.ts b/ui/grpc_generated/flow.ts deleted file mode 100644 index 0e8a709f8c..0000000000 --- a/ui/grpc_generated/flow.ts +++ /dev/null @@ -1,6730 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Timestamp } from "./google/protobuf/timestamp"; -import { Peer } from "./peers"; - -export const protobufPackage = "peerdb_flow"; - -/** protos for qrep */ -export enum QRepSyncMode { - QREP_SYNC_MODE_MULTI_INSERT = 0, - QREP_SYNC_MODE_STORAGE_AVRO = 1, - UNRECOGNIZED = -1, -} - -export function qRepSyncModeFromJSON(object: any): QRepSyncMode { - switch (object) { - case 0: - case "QREP_SYNC_MODE_MULTI_INSERT": - return QRepSyncMode.QREP_SYNC_MODE_MULTI_INSERT; - case 1: - case "QREP_SYNC_MODE_STORAGE_AVRO": - return QRepSyncMode.QREP_SYNC_MODE_STORAGE_AVRO; - case -1: - case "UNRECOGNIZED": - default: - return QRepSyncMode.UNRECOGNIZED; - } -} - -export function qRepSyncModeToJSON(object: QRepSyncMode): string { - switch (object) { - case QRepSyncMode.QREP_SYNC_MODE_MULTI_INSERT: - return "QREP_SYNC_MODE_MULTI_INSERT"; - case QRepSyncMode.QREP_SYNC_MODE_STORAGE_AVRO: - return "QREP_SYNC_MODE_STORAGE_AVRO"; - case QRepSyncMode.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export enum QRepWriteType { - QREP_WRITE_MODE_APPEND = 0, - QREP_WRITE_MODE_UPSERT = 1, - /** QREP_WRITE_MODE_OVERWRITE - only valid when initial_copy_true is set to true. TRUNCATES tables before reverting to APPEND. */ - QREP_WRITE_MODE_OVERWRITE = 2, - UNRECOGNIZED = -1, -} - -export function qRepWriteTypeFromJSON(object: any): QRepWriteType { - switch (object) { - case 0: - case "QREP_WRITE_MODE_APPEND": - return QRepWriteType.QREP_WRITE_MODE_APPEND; - case 1: - case "QREP_WRITE_MODE_UPSERT": - return QRepWriteType.QREP_WRITE_MODE_UPSERT; - case 2: - case "QREP_WRITE_MODE_OVERWRITE": - return QRepWriteType.QREP_WRITE_MODE_OVERWRITE; - case -1: - case "UNRECOGNIZED": - default: - return QRepWriteType.UNRECOGNIZED; - } -} - -export function qRepWriteTypeToJSON(object: QRepWriteType): string { - switch (object) { - case QRepWriteType.QREP_WRITE_MODE_APPEND: - return "QREP_WRITE_MODE_APPEND"; - case QRepWriteType.QREP_WRITE_MODE_UPSERT: - return "QREP_WRITE_MODE_UPSERT"; - case QRepWriteType.QREP_WRITE_MODE_OVERWRITE: - return "QREP_WRITE_MODE_OVERWRITE"; - case QRepWriteType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface TableNameMapping { - sourceTableName: string; - destinationTableName: string; -} - -export interface RelationMessageColumn { - flags: number; - name: string; - dataType: number; -} - -export interface RelationMessage { - relationId: number; - relationName: string; - columns: RelationMessageColumn[]; -} - -export interface TableMapping { - sourceTableIdentifier: string; - destinationTableIdentifier: string; - partitionKey: string; - exclude: string[]; -} - -export interface SetupInput { - peer: Peer | undefined; - flowName: string; -} - -export interface FlowConnectionConfigs { - source: Peer | undefined; - destination: Peer | undefined; - flowJobName: string; - tableSchema: TableSchema | undefined; - tableMappings: TableMapping[]; - srcTableIdNameMapping: { [key: number]: string }; - tableNameSchemaMapping: { [key: string]: TableSchema }; - /** - * This is an optional peer that will be used to hold metadata in cases where - * the destination isn't ideal for holding metadata. - */ - metadataPeer: Peer | undefined; - maxBatchSize: number; - doInitialCopy: boolean; - publicationName: string; - snapshotNumRowsPerPartition: number; - /** max parallel workers is per table */ - snapshotMaxParallelWorkers: number; - snapshotNumTablesInParallel: number; - snapshotSyncMode: QRepSyncMode; - cdcSyncMode: QRepSyncMode; - snapshotStagingPath: string; - cdcStagingPath: string; - /** currently only works for snowflake */ - softDelete: boolean; - replicationSlotName: string; - /** the below two are for eventhub only */ - pushBatchSize: number; - pushParallelism: number; - /** - * if true, then the flow will be resynced - * create new tables with "_resync" suffix, perform initial load and then swap the new tables with the old ones - * to be used after the old mirror is dropped - */ - resync: boolean; - softDeleteColName: string; - syncedAtColName: string; - initialCopyOnly: boolean; -} - -export interface FlowConnectionConfigs_SrcTableIdNameMappingEntry { - key: number; - value: string; -} - -export interface FlowConnectionConfigs_TableNameSchemaMappingEntry { - key: string; - value: TableSchema | undefined; -} - -export interface RenameTableOption { - currentName: string; - newName: string; - tableSchema: TableSchema | undefined; -} - -export interface RenameTablesInput { - flowJobName: string; - peer: Peer | undefined; - renameTableOptions: RenameTableOption[]; - softDeleteColName?: string | undefined; - syncedAtColName?: string | undefined; -} - -export interface RenameTablesOutput { - flowJobName: string; -} - -export interface CreateTablesFromExistingInput { - flowJobName: string; - peer: Peer | undefined; - newToExistingTableMapping: { [key: string]: string }; -} - -export interface CreateTablesFromExistingInput_NewToExistingTableMappingEntry { - key: string; - value: string; -} - -export interface CreateTablesFromExistingOutput { - flowJobName: string; -} - -export interface SyncFlowOptions { - batchSize: number; - relationMessageMapping: { [key: number]: RelationMessage }; -} - -export interface SyncFlowOptions_RelationMessageMappingEntry { - key: number; - value: RelationMessage | undefined; -} - -export interface NormalizeFlowOptions { - batchSize: number; -} - -export interface LastSyncState { - checkpoint: number; - lastSyncedAt: Date | undefined; -} - -export interface StartFlowInput { - lastSyncState: LastSyncState | undefined; - flowConnectionConfigs: FlowConnectionConfigs | undefined; - syncFlowOptions: SyncFlowOptions | undefined; - relationMessageMapping: { [key: number]: RelationMessage }; -} - -export interface StartFlowInput_RelationMessageMappingEntry { - key: number; - value: RelationMessage | undefined; -} - -export interface StartNormalizeInput { - flowConnectionConfigs: FlowConnectionConfigs | undefined; -} - -export interface GetLastSyncedIDInput { - peerConnectionConfig: Peer | undefined; - flowJobName: string; -} - -export interface EnsurePullabilityInput { - peerConnectionConfig: Peer | undefined; - flowJobName: string; - sourceTableIdentifier: string; -} - -export interface EnsurePullabilityBatchInput { - peerConnectionConfig: Peer | undefined; - flowJobName: string; - sourceTableIdentifiers: string[]; -} - -export interface PostgresTableIdentifier { - relId: number; -} - -export interface TableIdentifier { - postgresTableIdentifier?: PostgresTableIdentifier | undefined; -} - -export interface EnsurePullabilityOutput { - tableIdentifier: TableIdentifier | undefined; -} - -export interface EnsurePullabilityBatchOutput { - tableIdentifierMapping: { [key: string]: TableIdentifier }; -} - -export interface EnsurePullabilityBatchOutput_TableIdentifierMappingEntry { - key: string; - value: TableIdentifier | undefined; -} - -export interface SetupReplicationInput { - peerConnectionConfig: Peer | undefined; - flowJobName: string; - tableNameMapping: { [key: string]: string }; - /** replicate to destination using ctid */ - destinationPeer: Peer | undefined; - doInitialCopy: boolean; - existingPublicationName: string; - existingReplicationSlotName: string; -} - -export interface SetupReplicationInput_TableNameMappingEntry { - key: string; - value: string; -} - -export interface SetupReplicationOutput { - slotName: string; - snapshotName: string; -} - -export interface CreateRawTableInput { - peerConnectionConfig: Peer | undefined; - flowJobName: string; - tableNameMapping: { [key: string]: string }; - cdcSyncMode: QRepSyncMode; -} - -export interface CreateRawTableInput_TableNameMappingEntry { - key: string; - value: string; -} - -export interface CreateRawTableOutput { - tableIdentifier: string; -} - -export interface TableSchema { - tableIdentifier: string; - /** DEPRECATED: eliminate when breaking changes are allowed. */ - columns: { [key: string]: string }; - primaryKeyColumns: string[]; - isReplicaIdentityFull: boolean; - columnNames: string[]; - columnTypes: string[]; -} - -export interface TableSchema_ColumnsEntry { - key: string; - value: string; -} - -export interface GetTableSchemaBatchInput { - peerConnectionConfig: Peer | undefined; - tableIdentifiers: string[]; - flowName: string; - skipPkeyAndReplicaCheck: boolean; -} - -export interface GetTableSchemaBatchOutput { - tableNameSchemaMapping: { [key: string]: TableSchema }; -} - -export interface GetTableSchemaBatchOutput_TableNameSchemaMappingEntry { - key: string; - value: TableSchema | undefined; -} - -export interface SetupNormalizedTableInput { - peerConnectionConfig: Peer | undefined; - tableIdentifier: string; - sourceTableSchema: TableSchema | undefined; -} - -export interface SetupNormalizedTableBatchInput { - peerConnectionConfig: Peer | undefined; - tableNameSchemaMapping: { [key: string]: TableSchema }; - /** migration related columns */ - softDeleteColName: string; - syncedAtColName: string; - flowName: string; -} - -export interface SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry { - key: string; - value: TableSchema | undefined; -} - -export interface SetupNormalizedTableOutput { - tableIdentifier: string; - alreadyExists: boolean; -} - -export interface SetupNormalizedTableBatchOutput { - tableExistsMapping: { [key: string]: boolean }; -} - -export interface SetupNormalizedTableBatchOutput_TableExistsMappingEntry { - key: string; - value: boolean; -} - -/** partition ranges [start, end] inclusive */ -export interface IntPartitionRange { - start: number; - end: number; -} - -export interface TimestampPartitionRange { - start: Date | undefined; - end: Date | undefined; -} - -export interface TID { - blockNumber: number; - offsetNumber: number; -} - -export interface TIDPartitionRange { - start: TID | undefined; - end: TID | undefined; -} - -export interface PartitionRange { - intRange?: IntPartitionRange | undefined; - timestampRange?: TimestampPartitionRange | undefined; - tidRange?: TIDPartitionRange | undefined; -} - -export interface QRepWriteMode { - writeType: QRepWriteType; - upsertKeyColumns: string[]; -} - -export interface QRepConfig { - flowJobName: string; - sourcePeer: Peer | undefined; - destinationPeer: Peer | undefined; - destinationTableIdentifier: string; - query: string; - watermarkTable: string; - watermarkColumn: string; - initialCopyOnly: boolean; - syncMode: QRepSyncMode; - /** DEPRECATED: eliminate when breaking changes are allowed. */ - batchSizeInt: number; - /** DEPRECATED: eliminate when breaking changes are allowed. */ - batchDurationSeconds: number; - maxParallelWorkers: number; - /** time to wait between getting partitions to process */ - waitBetweenBatchesSeconds: number; - writeMode: - | QRepWriteMode - | undefined; - /** - * This is only used when sync_mode is AVRO - * this is the location where the avro files will be written - * if this starts with gs:// then it will be written to GCS - * if this starts with s3:// then it will be written to S3, only supported in Snowflake - * if nothing is specified then it will be written to local disk - * if using GCS or S3 make sure your instance has the correct permissions. - */ - stagingPath: string; - /** - * This setting overrides batch_size_int and batch_duration_seconds - * and instead uses the number of rows per partition to determine - * how many rows to process per batch. - */ - numRowsPerPartition: number; - /** Creates the watermark table on the destination as-is, can be used for some queries. */ - setupWatermarkTableOnDestination: boolean; - /** - * create new tables with "_peerdb_resync" suffix, perform initial load and then swap the new table with the old ones - * to be used after the old mirror is dropped - */ - dstTableFullResync: boolean; - syncedAtColName: string; - softDeleteColName: string; -} - -export interface QRepPartition { - partitionId: string; - range: PartitionRange | undefined; - fullTablePartition: boolean; -} - -export interface QRepPartitionBatch { - batchId: number; - partitions: QRepPartition[]; -} - -export interface QRepParitionResult { - partitions: QRepPartition[]; -} - -export interface DropFlowInput { - flowName: string; -} - -export interface DeltaAddedColumn { - columnName: string; - columnType: string; -} - -export interface TableSchemaDelta { - srcTableName: string; - dstTableName: string; - addedColumns: DeltaAddedColumn[]; -} - -export interface ReplayTableSchemaDeltaInput { - flowConnectionConfigs: FlowConnectionConfigs | undefined; - tableSchemaDeltas: TableSchemaDelta[]; -} - -export interface QRepFlowState { - lastPartition: QRepPartition | undefined; - numPartitionsProcessed: number; - needsResync: boolean; - disableWaitForNewRows: boolean; -} - -export interface PeerDBColumns { - softDeleteColName: string; - syncedAtColName: string; - softDelete: boolean; -} - -export interface GetOpenConnectionsForUserResult { - userName: string; - currentOpenConnections: number; -} - -function createBaseTableNameMapping(): TableNameMapping { - return { sourceTableName: "", destinationTableName: "" }; -} - -export const TableNameMapping = { - encode(message: TableNameMapping, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.sourceTableName !== "") { - writer.uint32(10).string(message.sourceTableName); - } - if (message.destinationTableName !== "") { - writer.uint32(18).string(message.destinationTableName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TableNameMapping { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTableNameMapping(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.sourceTableName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.destinationTableName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TableNameMapping { - return { - sourceTableName: isSet(object.sourceTableName) ? String(object.sourceTableName) : "", - destinationTableName: isSet(object.destinationTableName) ? String(object.destinationTableName) : "", - }; - }, - - toJSON(message: TableNameMapping): unknown { - const obj: any = {}; - if (message.sourceTableName !== "") { - obj.sourceTableName = message.sourceTableName; - } - if (message.destinationTableName !== "") { - obj.destinationTableName = message.destinationTableName; - } - return obj; - }, - - create, I>>(base?: I): TableNameMapping { - return TableNameMapping.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TableNameMapping { - const message = createBaseTableNameMapping(); - message.sourceTableName = object.sourceTableName ?? ""; - message.destinationTableName = object.destinationTableName ?? ""; - return message; - }, -}; - -function createBaseRelationMessageColumn(): RelationMessageColumn { - return { flags: 0, name: "", dataType: 0 }; -} - -export const RelationMessageColumn = { - encode(message: RelationMessageColumn, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flags !== 0) { - writer.uint32(8).uint32(message.flags); - } - if (message.name !== "") { - writer.uint32(18).string(message.name); - } - if (message.dataType !== 0) { - writer.uint32(24).uint32(message.dataType); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): RelationMessageColumn { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseRelationMessageColumn(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.flags = reader.uint32(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.name = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.dataType = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): RelationMessageColumn { - return { - flags: isSet(object.flags) ? Number(object.flags) : 0, - name: isSet(object.name) ? String(object.name) : "", - dataType: isSet(object.dataType) ? Number(object.dataType) : 0, - }; - }, - - toJSON(message: RelationMessageColumn): unknown { - const obj: any = {}; - if (message.flags !== 0) { - obj.flags = Math.round(message.flags); - } - if (message.name !== "") { - obj.name = message.name; - } - if (message.dataType !== 0) { - obj.dataType = Math.round(message.dataType); - } - return obj; - }, - - create, I>>(base?: I): RelationMessageColumn { - return RelationMessageColumn.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): RelationMessageColumn { - const message = createBaseRelationMessageColumn(); - message.flags = object.flags ?? 0; - message.name = object.name ?? ""; - message.dataType = object.dataType ?? 0; - return message; - }, -}; - -function createBaseRelationMessage(): RelationMessage { - return { relationId: 0, relationName: "", columns: [] }; -} - -export const RelationMessage = { - encode(message: RelationMessage, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.relationId !== 0) { - writer.uint32(8).uint32(message.relationId); - } - if (message.relationName !== "") { - writer.uint32(18).string(message.relationName); - } - for (const v of message.columns) { - RelationMessageColumn.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): RelationMessage { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseRelationMessage(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.relationId = reader.uint32(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.relationName = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.columns.push(RelationMessageColumn.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): RelationMessage { - return { - relationId: isSet(object.relationId) ? Number(object.relationId) : 0, - relationName: isSet(object.relationName) ? String(object.relationName) : "", - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => RelationMessageColumn.fromJSON(e)) : [], - }; - }, - - toJSON(message: RelationMessage): unknown { - const obj: any = {}; - if (message.relationId !== 0) { - obj.relationId = Math.round(message.relationId); - } - if (message.relationName !== "") { - obj.relationName = message.relationName; - } - if (message.columns?.length) { - obj.columns = message.columns.map((e) => RelationMessageColumn.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): RelationMessage { - return RelationMessage.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): RelationMessage { - const message = createBaseRelationMessage(); - message.relationId = object.relationId ?? 0; - message.relationName = object.relationName ?? ""; - message.columns = object.columns?.map((e) => RelationMessageColumn.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseTableMapping(): TableMapping { - return { sourceTableIdentifier: "", destinationTableIdentifier: "", partitionKey: "", exclude: [] }; -} - -export const TableMapping = { - encode(message: TableMapping, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.sourceTableIdentifier !== "") { - writer.uint32(10).string(message.sourceTableIdentifier); - } - if (message.destinationTableIdentifier !== "") { - writer.uint32(18).string(message.destinationTableIdentifier); - } - if (message.partitionKey !== "") { - writer.uint32(26).string(message.partitionKey); - } - for (const v of message.exclude) { - writer.uint32(34).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TableMapping { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTableMapping(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.sourceTableIdentifier = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.destinationTableIdentifier = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.partitionKey = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.exclude.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TableMapping { - return { - sourceTableIdentifier: isSet(object.sourceTableIdentifier) ? String(object.sourceTableIdentifier) : "", - destinationTableIdentifier: isSet(object.destinationTableIdentifier) - ? String(object.destinationTableIdentifier) - : "", - partitionKey: isSet(object.partitionKey) ? String(object.partitionKey) : "", - exclude: Array.isArray(object?.exclude) ? object.exclude.map((e: any) => String(e)) : [], - }; - }, - - toJSON(message: TableMapping): unknown { - const obj: any = {}; - if (message.sourceTableIdentifier !== "") { - obj.sourceTableIdentifier = message.sourceTableIdentifier; - } - if (message.destinationTableIdentifier !== "") { - obj.destinationTableIdentifier = message.destinationTableIdentifier; - } - if (message.partitionKey !== "") { - obj.partitionKey = message.partitionKey; - } - if (message.exclude?.length) { - obj.exclude = message.exclude; - } - return obj; - }, - - create, I>>(base?: I): TableMapping { - return TableMapping.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TableMapping { - const message = createBaseTableMapping(); - message.sourceTableIdentifier = object.sourceTableIdentifier ?? ""; - message.destinationTableIdentifier = object.destinationTableIdentifier ?? ""; - message.partitionKey = object.partitionKey ?? ""; - message.exclude = object.exclude?.map((e) => e) || []; - return message; - }, -}; - -function createBaseSetupInput(): SetupInput { - return { peer: undefined, flowName: "" }; -} - -export const SetupInput = { - encode(message: SetupInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peer !== undefined) { - Peer.encode(message.peer, writer.uint32(10).fork()).ldelim(); - } - if (message.flowName !== "") { - writer.uint32(18).string(message.flowName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peer = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.flowName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupInput { - return { - peer: isSet(object.peer) ? Peer.fromJSON(object.peer) : undefined, - flowName: isSet(object.flowName) ? String(object.flowName) : "", - }; - }, - - toJSON(message: SetupInput): unknown { - const obj: any = {}; - if (message.peer !== undefined) { - obj.peer = Peer.toJSON(message.peer); - } - if (message.flowName !== "") { - obj.flowName = message.flowName; - } - return obj; - }, - - create, I>>(base?: I): SetupInput { - return SetupInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SetupInput { - const message = createBaseSetupInput(); - message.peer = (object.peer !== undefined && object.peer !== null) ? Peer.fromPartial(object.peer) : undefined; - message.flowName = object.flowName ?? ""; - return message; - }, -}; - -function createBaseFlowConnectionConfigs(): FlowConnectionConfigs { - return { - source: undefined, - destination: undefined, - flowJobName: "", - tableSchema: undefined, - tableMappings: [], - srcTableIdNameMapping: {}, - tableNameSchemaMapping: {}, - metadataPeer: undefined, - maxBatchSize: 0, - doInitialCopy: false, - publicationName: "", - snapshotNumRowsPerPartition: 0, - snapshotMaxParallelWorkers: 0, - snapshotNumTablesInParallel: 0, - snapshotSyncMode: 0, - cdcSyncMode: 0, - snapshotStagingPath: "", - cdcStagingPath: "", - softDelete: false, - replicationSlotName: "", - pushBatchSize: 0, - pushParallelism: 0, - resync: false, - softDeleteColName: "", - syncedAtColName: "", - initialCopyOnly: false, - }; -} - -export const FlowConnectionConfigs = { - encode(message: FlowConnectionConfigs, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.source !== undefined) { - Peer.encode(message.source, writer.uint32(10).fork()).ldelim(); - } - if (message.destination !== undefined) { - Peer.encode(message.destination, writer.uint32(18).fork()).ldelim(); - } - if (message.flowJobName !== "") { - writer.uint32(26).string(message.flowJobName); - } - if (message.tableSchema !== undefined) { - TableSchema.encode(message.tableSchema, writer.uint32(34).fork()).ldelim(); - } - for (const v of message.tableMappings) { - TableMapping.encode(v!, writer.uint32(42).fork()).ldelim(); - } - Object.entries(message.srcTableIdNameMapping).forEach(([key, value]) => { - FlowConnectionConfigs_SrcTableIdNameMappingEntry.encode({ key: key as any, value }, writer.uint32(50).fork()) - .ldelim(); - }); - Object.entries(message.tableNameSchemaMapping).forEach(([key, value]) => { - FlowConnectionConfigs_TableNameSchemaMappingEntry.encode({ key: key as any, value }, writer.uint32(58).fork()) - .ldelim(); - }); - if (message.metadataPeer !== undefined) { - Peer.encode(message.metadataPeer, writer.uint32(66).fork()).ldelim(); - } - if (message.maxBatchSize !== 0) { - writer.uint32(72).uint32(message.maxBatchSize); - } - if (message.doInitialCopy === true) { - writer.uint32(80).bool(message.doInitialCopy); - } - if (message.publicationName !== "") { - writer.uint32(90).string(message.publicationName); - } - if (message.snapshotNumRowsPerPartition !== 0) { - writer.uint32(96).uint32(message.snapshotNumRowsPerPartition); - } - if (message.snapshotMaxParallelWorkers !== 0) { - writer.uint32(104).uint32(message.snapshotMaxParallelWorkers); - } - if (message.snapshotNumTablesInParallel !== 0) { - writer.uint32(112).uint32(message.snapshotNumTablesInParallel); - } - if (message.snapshotSyncMode !== 0) { - writer.uint32(120).int32(message.snapshotSyncMode); - } - if (message.cdcSyncMode !== 0) { - writer.uint32(128).int32(message.cdcSyncMode); - } - if (message.snapshotStagingPath !== "") { - writer.uint32(138).string(message.snapshotStagingPath); - } - if (message.cdcStagingPath !== "") { - writer.uint32(146).string(message.cdcStagingPath); - } - if (message.softDelete === true) { - writer.uint32(152).bool(message.softDelete); - } - if (message.replicationSlotName !== "") { - writer.uint32(162).string(message.replicationSlotName); - } - if (message.pushBatchSize !== 0) { - writer.uint32(168).int64(message.pushBatchSize); - } - if (message.pushParallelism !== 0) { - writer.uint32(176).int64(message.pushParallelism); - } - if (message.resync === true) { - writer.uint32(184).bool(message.resync); - } - if (message.softDeleteColName !== "") { - writer.uint32(194).string(message.softDeleteColName); - } - if (message.syncedAtColName !== "") { - writer.uint32(202).string(message.syncedAtColName); - } - if (message.initialCopyOnly === true) { - writer.uint32(208).bool(message.initialCopyOnly); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FlowConnectionConfigs { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFlowConnectionConfigs(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.source = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.destination = Peer.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.tableSchema = TableSchema.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.tableMappings.push(TableMapping.decode(reader, reader.uint32())); - continue; - case 6: - if (tag !== 50) { - break; - } - - const entry6 = FlowConnectionConfigs_SrcTableIdNameMappingEntry.decode(reader, reader.uint32()); - if (entry6.value !== undefined) { - message.srcTableIdNameMapping[entry6.key] = entry6.value; - } - continue; - case 7: - if (tag !== 58) { - break; - } - - const entry7 = FlowConnectionConfigs_TableNameSchemaMappingEntry.decode(reader, reader.uint32()); - if (entry7.value !== undefined) { - message.tableNameSchemaMapping[entry7.key] = entry7.value; - } - continue; - case 8: - if (tag !== 66) { - break; - } - - message.metadataPeer = Peer.decode(reader, reader.uint32()); - continue; - case 9: - if (tag !== 72) { - break; - } - - message.maxBatchSize = reader.uint32(); - continue; - case 10: - if (tag !== 80) { - break; - } - - message.doInitialCopy = reader.bool(); - continue; - case 11: - if (tag !== 90) { - break; - } - - message.publicationName = reader.string(); - continue; - case 12: - if (tag !== 96) { - break; - } - - message.snapshotNumRowsPerPartition = reader.uint32(); - continue; - case 13: - if (tag !== 104) { - break; - } - - message.snapshotMaxParallelWorkers = reader.uint32(); - continue; - case 14: - if (tag !== 112) { - break; - } - - message.snapshotNumTablesInParallel = reader.uint32(); - continue; - case 15: - if (tag !== 120) { - break; - } - - message.snapshotSyncMode = reader.int32() as any; - continue; - case 16: - if (tag !== 128) { - break; - } - - message.cdcSyncMode = reader.int32() as any; - continue; - case 17: - if (tag !== 138) { - break; - } - - message.snapshotStagingPath = reader.string(); - continue; - case 18: - if (tag !== 146) { - break; - } - - message.cdcStagingPath = reader.string(); - continue; - case 19: - if (tag !== 152) { - break; - } - - message.softDelete = reader.bool(); - continue; - case 20: - if (tag !== 162) { - break; - } - - message.replicationSlotName = reader.string(); - continue; - case 21: - if (tag !== 168) { - break; - } - - message.pushBatchSize = longToNumber(reader.int64() as Long); - continue; - case 22: - if (tag !== 176) { - break; - } - - message.pushParallelism = longToNumber(reader.int64() as Long); - continue; - case 23: - if (tag !== 184) { - break; - } - - message.resync = reader.bool(); - continue; - case 24: - if (tag !== 194) { - break; - } - - message.softDeleteColName = reader.string(); - continue; - case 25: - if (tag !== 202) { - break; - } - - message.syncedAtColName = reader.string(); - continue; - case 26: - if (tag !== 208) { - break; - } - - message.initialCopyOnly = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FlowConnectionConfigs { - return { - source: isSet(object.source) ? Peer.fromJSON(object.source) : undefined, - destination: isSet(object.destination) ? Peer.fromJSON(object.destination) : undefined, - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - tableSchema: isSet(object.tableSchema) ? TableSchema.fromJSON(object.tableSchema) : undefined, - tableMappings: Array.isArray(object?.tableMappings) - ? object.tableMappings.map((e: any) => TableMapping.fromJSON(e)) - : [], - srcTableIdNameMapping: isObject(object.srcTableIdNameMapping) - ? Object.entries(object.srcTableIdNameMapping).reduce<{ [key: number]: string }>((acc, [key, value]) => { - acc[Number(key)] = String(value); - return acc; - }, {}) - : {}, - tableNameSchemaMapping: isObject(object.tableNameSchemaMapping) - ? Object.entries(object.tableNameSchemaMapping).reduce<{ [key: string]: TableSchema }>((acc, [key, value]) => { - acc[key] = TableSchema.fromJSON(value); - return acc; - }, {}) - : {}, - metadataPeer: isSet(object.metadataPeer) ? Peer.fromJSON(object.metadataPeer) : undefined, - maxBatchSize: isSet(object.maxBatchSize) ? Number(object.maxBatchSize) : 0, - doInitialCopy: isSet(object.doInitialCopy) ? Boolean(object.doInitialCopy) : false, - publicationName: isSet(object.publicationName) ? String(object.publicationName) : "", - snapshotNumRowsPerPartition: isSet(object.snapshotNumRowsPerPartition) - ? Number(object.snapshotNumRowsPerPartition) - : 0, - snapshotMaxParallelWorkers: isSet(object.snapshotMaxParallelWorkers) - ? Number(object.snapshotMaxParallelWorkers) - : 0, - snapshotNumTablesInParallel: isSet(object.snapshotNumTablesInParallel) - ? Number(object.snapshotNumTablesInParallel) - : 0, - snapshotSyncMode: isSet(object.snapshotSyncMode) ? qRepSyncModeFromJSON(object.snapshotSyncMode) : 0, - cdcSyncMode: isSet(object.cdcSyncMode) ? qRepSyncModeFromJSON(object.cdcSyncMode) : 0, - snapshotStagingPath: isSet(object.snapshotStagingPath) ? String(object.snapshotStagingPath) : "", - cdcStagingPath: isSet(object.cdcStagingPath) ? String(object.cdcStagingPath) : "", - softDelete: isSet(object.softDelete) ? Boolean(object.softDelete) : false, - replicationSlotName: isSet(object.replicationSlotName) ? String(object.replicationSlotName) : "", - pushBatchSize: isSet(object.pushBatchSize) ? Number(object.pushBatchSize) : 0, - pushParallelism: isSet(object.pushParallelism) ? Number(object.pushParallelism) : 0, - resync: isSet(object.resync) ? Boolean(object.resync) : false, - softDeleteColName: isSet(object.softDeleteColName) ? String(object.softDeleteColName) : "", - syncedAtColName: isSet(object.syncedAtColName) ? String(object.syncedAtColName) : "", - initialCopyOnly: isSet(object.initialCopyOnly) ? Boolean(object.initialCopyOnly) : false, - }; - }, - - toJSON(message: FlowConnectionConfigs): unknown { - const obj: any = {}; - if (message.source !== undefined) { - obj.source = Peer.toJSON(message.source); - } - if (message.destination !== undefined) { - obj.destination = Peer.toJSON(message.destination); - } - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.tableSchema !== undefined) { - obj.tableSchema = TableSchema.toJSON(message.tableSchema); - } - if (message.tableMappings?.length) { - obj.tableMappings = message.tableMappings.map((e) => TableMapping.toJSON(e)); - } - if (message.srcTableIdNameMapping) { - const entries = Object.entries(message.srcTableIdNameMapping); - if (entries.length > 0) { - obj.srcTableIdNameMapping = {}; - entries.forEach(([k, v]) => { - obj.srcTableIdNameMapping[k] = v; - }); - } - } - if (message.tableNameSchemaMapping) { - const entries = Object.entries(message.tableNameSchemaMapping); - if (entries.length > 0) { - obj.tableNameSchemaMapping = {}; - entries.forEach(([k, v]) => { - obj.tableNameSchemaMapping[k] = TableSchema.toJSON(v); - }); - } - } - if (message.metadataPeer !== undefined) { - obj.metadataPeer = Peer.toJSON(message.metadataPeer); - } - if (message.maxBatchSize !== 0) { - obj.maxBatchSize = Math.round(message.maxBatchSize); - } - if (message.doInitialCopy === true) { - obj.doInitialCopy = message.doInitialCopy; - } - if (message.publicationName !== "") { - obj.publicationName = message.publicationName; - } - if (message.snapshotNumRowsPerPartition !== 0) { - obj.snapshotNumRowsPerPartition = Math.round(message.snapshotNumRowsPerPartition); - } - if (message.snapshotMaxParallelWorkers !== 0) { - obj.snapshotMaxParallelWorkers = Math.round(message.snapshotMaxParallelWorkers); - } - if (message.snapshotNumTablesInParallel !== 0) { - obj.snapshotNumTablesInParallel = Math.round(message.snapshotNumTablesInParallel); - } - if (message.snapshotSyncMode !== 0) { - obj.snapshotSyncMode = qRepSyncModeToJSON(message.snapshotSyncMode); - } - if (message.cdcSyncMode !== 0) { - obj.cdcSyncMode = qRepSyncModeToJSON(message.cdcSyncMode); - } - if (message.snapshotStagingPath !== "") { - obj.snapshotStagingPath = message.snapshotStagingPath; - } - if (message.cdcStagingPath !== "") { - obj.cdcStagingPath = message.cdcStagingPath; - } - if (message.softDelete === true) { - obj.softDelete = message.softDelete; - } - if (message.replicationSlotName !== "") { - obj.replicationSlotName = message.replicationSlotName; - } - if (message.pushBatchSize !== 0) { - obj.pushBatchSize = Math.round(message.pushBatchSize); - } - if (message.pushParallelism !== 0) { - obj.pushParallelism = Math.round(message.pushParallelism); - } - if (message.resync === true) { - obj.resync = message.resync; - } - if (message.softDeleteColName !== "") { - obj.softDeleteColName = message.softDeleteColName; - } - if (message.syncedAtColName !== "") { - obj.syncedAtColName = message.syncedAtColName; - } - if (message.initialCopyOnly === true) { - obj.initialCopyOnly = message.initialCopyOnly; - } - return obj; - }, - - create, I>>(base?: I): FlowConnectionConfigs { - return FlowConnectionConfigs.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): FlowConnectionConfigs { - const message = createBaseFlowConnectionConfigs(); - message.source = (object.source !== undefined && object.source !== null) - ? Peer.fromPartial(object.source) - : undefined; - message.destination = (object.destination !== undefined && object.destination !== null) - ? Peer.fromPartial(object.destination) - : undefined; - message.flowJobName = object.flowJobName ?? ""; - message.tableSchema = (object.tableSchema !== undefined && object.tableSchema !== null) - ? TableSchema.fromPartial(object.tableSchema) - : undefined; - message.tableMappings = object.tableMappings?.map((e) => TableMapping.fromPartial(e)) || []; - message.srcTableIdNameMapping = Object.entries(object.srcTableIdNameMapping ?? {}).reduce< - { [key: number]: string } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = String(value); - } - return acc; - }, {}); - message.tableNameSchemaMapping = Object.entries(object.tableNameSchemaMapping ?? {}).reduce< - { [key: string]: TableSchema } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[key] = TableSchema.fromPartial(value); - } - return acc; - }, {}); - message.metadataPeer = (object.metadataPeer !== undefined && object.metadataPeer !== null) - ? Peer.fromPartial(object.metadataPeer) - : undefined; - message.maxBatchSize = object.maxBatchSize ?? 0; - message.doInitialCopy = object.doInitialCopy ?? false; - message.publicationName = object.publicationName ?? ""; - message.snapshotNumRowsPerPartition = object.snapshotNumRowsPerPartition ?? 0; - message.snapshotMaxParallelWorkers = object.snapshotMaxParallelWorkers ?? 0; - message.snapshotNumTablesInParallel = object.snapshotNumTablesInParallel ?? 0; - message.snapshotSyncMode = object.snapshotSyncMode ?? 0; - message.cdcSyncMode = object.cdcSyncMode ?? 0; - message.snapshotStagingPath = object.snapshotStagingPath ?? ""; - message.cdcStagingPath = object.cdcStagingPath ?? ""; - message.softDelete = object.softDelete ?? false; - message.replicationSlotName = object.replicationSlotName ?? ""; - message.pushBatchSize = object.pushBatchSize ?? 0; - message.pushParallelism = object.pushParallelism ?? 0; - message.resync = object.resync ?? false; - message.softDeleteColName = object.softDeleteColName ?? ""; - message.syncedAtColName = object.syncedAtColName ?? ""; - message.initialCopyOnly = object.initialCopyOnly ?? false; - return message; - }, -}; - -function createBaseFlowConnectionConfigs_SrcTableIdNameMappingEntry(): FlowConnectionConfigs_SrcTableIdNameMappingEntry { - return { key: 0, value: "" }; -} - -export const FlowConnectionConfigs_SrcTableIdNameMappingEntry = { - encode( - message: FlowConnectionConfigs_SrcTableIdNameMappingEntry, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== 0) { - writer.uint32(8).uint32(message.key); - } - if (message.value !== "") { - writer.uint32(18).string(message.value); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FlowConnectionConfigs_SrcTableIdNameMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFlowConnectionConfigs_SrcTableIdNameMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.key = reader.uint32(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FlowConnectionConfigs_SrcTableIdNameMappingEntry { - return { key: isSet(object.key) ? Number(object.key) : 0, value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: FlowConnectionConfigs_SrcTableIdNameMappingEntry): unknown { - const obj: any = {}; - if (message.key !== 0) { - obj.key = Math.round(message.key); - } - if (message.value !== "") { - obj.value = message.value; - } - return obj; - }, - - create, I>>( - base?: I, - ): FlowConnectionConfigs_SrcTableIdNameMappingEntry { - return FlowConnectionConfigs_SrcTableIdNameMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): FlowConnectionConfigs_SrcTableIdNameMappingEntry { - const message = createBaseFlowConnectionConfigs_SrcTableIdNameMappingEntry(); - message.key = object.key ?? 0; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseFlowConnectionConfigs_TableNameSchemaMappingEntry(): FlowConnectionConfigs_TableNameSchemaMappingEntry { - return { key: "", value: undefined }; -} - -export const FlowConnectionConfigs_TableNameSchemaMappingEntry = { - encode( - message: FlowConnectionConfigs_TableNameSchemaMappingEntry, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== undefined) { - TableSchema.encode(message.value, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FlowConnectionConfigs_TableNameSchemaMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFlowConnectionConfigs_TableNameSchemaMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = TableSchema.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FlowConnectionConfigs_TableNameSchemaMappingEntry { - return { - key: isSet(object.key) ? String(object.key) : "", - value: isSet(object.value) ? TableSchema.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: FlowConnectionConfigs_TableNameSchemaMappingEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== undefined) { - obj.value = TableSchema.toJSON(message.value); - } - return obj; - }, - - create, I>>( - base?: I, - ): FlowConnectionConfigs_TableNameSchemaMappingEntry { - return FlowConnectionConfigs_TableNameSchemaMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): FlowConnectionConfigs_TableNameSchemaMappingEntry { - const message = createBaseFlowConnectionConfigs_TableNameSchemaMappingEntry(); - message.key = object.key ?? ""; - message.value = (object.value !== undefined && object.value !== null) - ? TableSchema.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseRenameTableOption(): RenameTableOption { - return { currentName: "", newName: "", tableSchema: undefined }; -} - -export const RenameTableOption = { - encode(message: RenameTableOption, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.currentName !== "") { - writer.uint32(10).string(message.currentName); - } - if (message.newName !== "") { - writer.uint32(18).string(message.newName); - } - if (message.tableSchema !== undefined) { - TableSchema.encode(message.tableSchema, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): RenameTableOption { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseRenameTableOption(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.currentName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.newName = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.tableSchema = TableSchema.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): RenameTableOption { - return { - currentName: isSet(object.currentName) ? String(object.currentName) : "", - newName: isSet(object.newName) ? String(object.newName) : "", - tableSchema: isSet(object.tableSchema) ? TableSchema.fromJSON(object.tableSchema) : undefined, - }; - }, - - toJSON(message: RenameTableOption): unknown { - const obj: any = {}; - if (message.currentName !== "") { - obj.currentName = message.currentName; - } - if (message.newName !== "") { - obj.newName = message.newName; - } - if (message.tableSchema !== undefined) { - obj.tableSchema = TableSchema.toJSON(message.tableSchema); - } - return obj; - }, - - create, I>>(base?: I): RenameTableOption { - return RenameTableOption.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): RenameTableOption { - const message = createBaseRenameTableOption(); - message.currentName = object.currentName ?? ""; - message.newName = object.newName ?? ""; - message.tableSchema = (object.tableSchema !== undefined && object.tableSchema !== null) - ? TableSchema.fromPartial(object.tableSchema) - : undefined; - return message; - }, -}; - -function createBaseRenameTablesInput(): RenameTablesInput { - return { - flowJobName: "", - peer: undefined, - renameTableOptions: [], - softDeleteColName: undefined, - syncedAtColName: undefined, - }; -} - -export const RenameTablesInput = { - encode(message: RenameTablesInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowJobName !== "") { - writer.uint32(10).string(message.flowJobName); - } - if (message.peer !== undefined) { - Peer.encode(message.peer, writer.uint32(18).fork()).ldelim(); - } - for (const v of message.renameTableOptions) { - RenameTableOption.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.softDeleteColName !== undefined) { - writer.uint32(34).string(message.softDeleteColName); - } - if (message.syncedAtColName !== undefined) { - writer.uint32(42).string(message.syncedAtColName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): RenameTablesInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseRenameTablesInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.peer = Peer.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.renameTableOptions.push(RenameTableOption.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.softDeleteColName = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.syncedAtColName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): RenameTablesInput { - return { - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - peer: isSet(object.peer) ? Peer.fromJSON(object.peer) : undefined, - renameTableOptions: Array.isArray(object?.renameTableOptions) - ? object.renameTableOptions.map((e: any) => RenameTableOption.fromJSON(e)) - : [], - softDeleteColName: isSet(object.softDeleteColName) ? String(object.softDeleteColName) : undefined, - syncedAtColName: isSet(object.syncedAtColName) ? String(object.syncedAtColName) : undefined, - }; - }, - - toJSON(message: RenameTablesInput): unknown { - const obj: any = {}; - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.peer !== undefined) { - obj.peer = Peer.toJSON(message.peer); - } - if (message.renameTableOptions?.length) { - obj.renameTableOptions = message.renameTableOptions.map((e) => RenameTableOption.toJSON(e)); - } - if (message.softDeleteColName !== undefined) { - obj.softDeleteColName = message.softDeleteColName; - } - if (message.syncedAtColName !== undefined) { - obj.syncedAtColName = message.syncedAtColName; - } - return obj; - }, - - create, I>>(base?: I): RenameTablesInput { - return RenameTablesInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): RenameTablesInput { - const message = createBaseRenameTablesInput(); - message.flowJobName = object.flowJobName ?? ""; - message.peer = (object.peer !== undefined && object.peer !== null) ? Peer.fromPartial(object.peer) : undefined; - message.renameTableOptions = object.renameTableOptions?.map((e) => RenameTableOption.fromPartial(e)) || []; - message.softDeleteColName = object.softDeleteColName ?? undefined; - message.syncedAtColName = object.syncedAtColName ?? undefined; - return message; - }, -}; - -function createBaseRenameTablesOutput(): RenameTablesOutput { - return { flowJobName: "" }; -} - -export const RenameTablesOutput = { - encode(message: RenameTablesOutput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowJobName !== "") { - writer.uint32(10).string(message.flowJobName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): RenameTablesOutput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseRenameTablesOutput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.flowJobName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): RenameTablesOutput { - return { flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "" }; - }, - - toJSON(message: RenameTablesOutput): unknown { - const obj: any = {}; - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - return obj; - }, - - create, I>>(base?: I): RenameTablesOutput { - return RenameTablesOutput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): RenameTablesOutput { - const message = createBaseRenameTablesOutput(); - message.flowJobName = object.flowJobName ?? ""; - return message; - }, -}; - -function createBaseCreateTablesFromExistingInput(): CreateTablesFromExistingInput { - return { flowJobName: "", peer: undefined, newToExistingTableMapping: {} }; -} - -export const CreateTablesFromExistingInput = { - encode(message: CreateTablesFromExistingInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowJobName !== "") { - writer.uint32(10).string(message.flowJobName); - } - if (message.peer !== undefined) { - Peer.encode(message.peer, writer.uint32(18).fork()).ldelim(); - } - Object.entries(message.newToExistingTableMapping).forEach(([key, value]) => { - CreateTablesFromExistingInput_NewToExistingTableMappingEntry.encode( - { key: key as any, value }, - writer.uint32(26).fork(), - ).ldelim(); - }); - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreateTablesFromExistingInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateTablesFromExistingInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.peer = Peer.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - const entry3 = CreateTablesFromExistingInput_NewToExistingTableMappingEntry.decode(reader, reader.uint32()); - if (entry3.value !== undefined) { - message.newToExistingTableMapping[entry3.key] = entry3.value; - } - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateTablesFromExistingInput { - return { - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - peer: isSet(object.peer) ? Peer.fromJSON(object.peer) : undefined, - newToExistingTableMapping: isObject(object.newToExistingTableMapping) - ? Object.entries(object.newToExistingTableMapping).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: CreateTablesFromExistingInput): unknown { - const obj: any = {}; - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.peer !== undefined) { - obj.peer = Peer.toJSON(message.peer); - } - if (message.newToExistingTableMapping) { - const entries = Object.entries(message.newToExistingTableMapping); - if (entries.length > 0) { - obj.newToExistingTableMapping = {}; - entries.forEach(([k, v]) => { - obj.newToExistingTableMapping[k] = v; - }); - } - } - return obj; - }, - - create, I>>(base?: I): CreateTablesFromExistingInput { - return CreateTablesFromExistingInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): CreateTablesFromExistingInput { - const message = createBaseCreateTablesFromExistingInput(); - message.flowJobName = object.flowJobName ?? ""; - message.peer = (object.peer !== undefined && object.peer !== null) ? Peer.fromPartial(object.peer) : undefined; - message.newToExistingTableMapping = Object.entries(object.newToExistingTableMapping ?? {}).reduce< - { [key: string]: string } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, {}); - return message; - }, -}; - -function createBaseCreateTablesFromExistingInput_NewToExistingTableMappingEntry(): CreateTablesFromExistingInput_NewToExistingTableMappingEntry { - return { key: "", value: "" }; -} - -export const CreateTablesFromExistingInput_NewToExistingTableMappingEntry = { - encode( - message: CreateTablesFromExistingInput_NewToExistingTableMappingEntry, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== "") { - writer.uint32(18).string(message.value); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): CreateTablesFromExistingInput_NewToExistingTableMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateTablesFromExistingInput_NewToExistingTableMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateTablesFromExistingInput_NewToExistingTableMappingEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: CreateTablesFromExistingInput_NewToExistingTableMappingEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== "") { - obj.value = message.value; - } - return obj; - }, - - create, I>>( - base?: I, - ): CreateTablesFromExistingInput_NewToExistingTableMappingEntry { - return CreateTablesFromExistingInput_NewToExistingTableMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): CreateTablesFromExistingInput_NewToExistingTableMappingEntry { - const message = createBaseCreateTablesFromExistingInput_NewToExistingTableMappingEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseCreateTablesFromExistingOutput(): CreateTablesFromExistingOutput { - return { flowJobName: "" }; -} - -export const CreateTablesFromExistingOutput = { - encode(message: CreateTablesFromExistingOutput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowJobName !== "") { - writer.uint32(18).string(message.flowJobName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreateTablesFromExistingOutput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateTablesFromExistingOutput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 2: - if (tag !== 18) { - break; - } - - message.flowJobName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateTablesFromExistingOutput { - return { flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "" }; - }, - - toJSON(message: CreateTablesFromExistingOutput): unknown { - const obj: any = {}; - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - return obj; - }, - - create, I>>(base?: I): CreateTablesFromExistingOutput { - return CreateTablesFromExistingOutput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): CreateTablesFromExistingOutput { - const message = createBaseCreateTablesFromExistingOutput(); - message.flowJobName = object.flowJobName ?? ""; - return message; - }, -}; - -function createBaseSyncFlowOptions(): SyncFlowOptions { - return { batchSize: 0, relationMessageMapping: {} }; -} - -export const SyncFlowOptions = { - encode(message: SyncFlowOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.batchSize !== 0) { - writer.uint32(8).int32(message.batchSize); - } - Object.entries(message.relationMessageMapping).forEach(([key, value]) => { - SyncFlowOptions_RelationMessageMappingEntry.encode({ key: key as any, value }, writer.uint32(18).fork()).ldelim(); - }); - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SyncFlowOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSyncFlowOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.batchSize = reader.int32(); - continue; - case 2: - if (tag !== 18) { - break; - } - - const entry2 = SyncFlowOptions_RelationMessageMappingEntry.decode(reader, reader.uint32()); - if (entry2.value !== undefined) { - message.relationMessageMapping[entry2.key] = entry2.value; - } - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SyncFlowOptions { - return { - batchSize: isSet(object.batchSize) ? Number(object.batchSize) : 0, - relationMessageMapping: isObject(object.relationMessageMapping) - ? Object.entries(object.relationMessageMapping).reduce<{ [key: number]: RelationMessage }>( - (acc, [key, value]) => { - acc[Number(key)] = RelationMessage.fromJSON(value); - return acc; - }, - {}, - ) - : {}, - }; - }, - - toJSON(message: SyncFlowOptions): unknown { - const obj: any = {}; - if (message.batchSize !== 0) { - obj.batchSize = Math.round(message.batchSize); - } - if (message.relationMessageMapping) { - const entries = Object.entries(message.relationMessageMapping); - if (entries.length > 0) { - obj.relationMessageMapping = {}; - entries.forEach(([k, v]) => { - obj.relationMessageMapping[k] = RelationMessage.toJSON(v); - }); - } - } - return obj; - }, - - create, I>>(base?: I): SyncFlowOptions { - return SyncFlowOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SyncFlowOptions { - const message = createBaseSyncFlowOptions(); - message.batchSize = object.batchSize ?? 0; - message.relationMessageMapping = Object.entries(object.relationMessageMapping ?? {}).reduce< - { [key: number]: RelationMessage } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = RelationMessage.fromPartial(value); - } - return acc; - }, {}); - return message; - }, -}; - -function createBaseSyncFlowOptions_RelationMessageMappingEntry(): SyncFlowOptions_RelationMessageMappingEntry { - return { key: 0, value: undefined }; -} - -export const SyncFlowOptions_RelationMessageMappingEntry = { - encode(message: SyncFlowOptions_RelationMessageMappingEntry, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.key !== 0) { - writer.uint32(8).uint32(message.key); - } - if (message.value !== undefined) { - RelationMessage.encode(message.value, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SyncFlowOptions_RelationMessageMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSyncFlowOptions_RelationMessageMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.key = reader.uint32(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = RelationMessage.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SyncFlowOptions_RelationMessageMappingEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? RelationMessage.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: SyncFlowOptions_RelationMessageMappingEntry): unknown { - const obj: any = {}; - if (message.key !== 0) { - obj.key = Math.round(message.key); - } - if (message.value !== undefined) { - obj.value = RelationMessage.toJSON(message.value); - } - return obj; - }, - - create, I>>( - base?: I, - ): SyncFlowOptions_RelationMessageMappingEntry { - return SyncFlowOptions_RelationMessageMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): SyncFlowOptions_RelationMessageMappingEntry { - const message = createBaseSyncFlowOptions_RelationMessageMappingEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? RelationMessage.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseNormalizeFlowOptions(): NormalizeFlowOptions { - return { batchSize: 0 }; -} - -export const NormalizeFlowOptions = { - encode(message: NormalizeFlowOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.batchSize !== 0) { - writer.uint32(8).int32(message.batchSize); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): NormalizeFlowOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseNormalizeFlowOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.batchSize = reader.int32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): NormalizeFlowOptions { - return { batchSize: isSet(object.batchSize) ? Number(object.batchSize) : 0 }; - }, - - toJSON(message: NormalizeFlowOptions): unknown { - const obj: any = {}; - if (message.batchSize !== 0) { - obj.batchSize = Math.round(message.batchSize); - } - return obj; - }, - - create, I>>(base?: I): NormalizeFlowOptions { - return NormalizeFlowOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): NormalizeFlowOptions { - const message = createBaseNormalizeFlowOptions(); - message.batchSize = object.batchSize ?? 0; - return message; - }, -}; - -function createBaseLastSyncState(): LastSyncState { - return { checkpoint: 0, lastSyncedAt: undefined }; -} - -export const LastSyncState = { - encode(message: LastSyncState, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.checkpoint !== 0) { - writer.uint32(8).int64(message.checkpoint); - } - if (message.lastSyncedAt !== undefined) { - Timestamp.encode(toTimestamp(message.lastSyncedAt), writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): LastSyncState { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLastSyncState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.checkpoint = longToNumber(reader.int64() as Long); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.lastSyncedAt = fromTimestamp(Timestamp.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): LastSyncState { - return { - checkpoint: isSet(object.checkpoint) ? Number(object.checkpoint) : 0, - lastSyncedAt: isSet(object.lastSyncedAt) ? fromJsonTimestamp(object.lastSyncedAt) : undefined, - }; - }, - - toJSON(message: LastSyncState): unknown { - const obj: any = {}; - if (message.checkpoint !== 0) { - obj.checkpoint = Math.round(message.checkpoint); - } - if (message.lastSyncedAt !== undefined) { - obj.lastSyncedAt = message.lastSyncedAt.toISOString(); - } - return obj; - }, - - create, I>>(base?: I): LastSyncState { - return LastSyncState.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): LastSyncState { - const message = createBaseLastSyncState(); - message.checkpoint = object.checkpoint ?? 0; - message.lastSyncedAt = object.lastSyncedAt ?? undefined; - return message; - }, -}; - -function createBaseStartFlowInput(): StartFlowInput { - return { - lastSyncState: undefined, - flowConnectionConfigs: undefined, - syncFlowOptions: undefined, - relationMessageMapping: {}, - }; -} - -export const StartFlowInput = { - encode(message: StartFlowInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.lastSyncState !== undefined) { - LastSyncState.encode(message.lastSyncState, writer.uint32(10).fork()).ldelim(); - } - if (message.flowConnectionConfigs !== undefined) { - FlowConnectionConfigs.encode(message.flowConnectionConfigs, writer.uint32(18).fork()).ldelim(); - } - if (message.syncFlowOptions !== undefined) { - SyncFlowOptions.encode(message.syncFlowOptions, writer.uint32(26).fork()).ldelim(); - } - Object.entries(message.relationMessageMapping).forEach(([key, value]) => { - StartFlowInput_RelationMessageMappingEntry.encode({ key: key as any, value }, writer.uint32(34).fork()).ldelim(); - }); - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): StartFlowInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStartFlowInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.lastSyncState = LastSyncState.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.flowConnectionConfigs = FlowConnectionConfigs.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.syncFlowOptions = SyncFlowOptions.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - const entry4 = StartFlowInput_RelationMessageMappingEntry.decode(reader, reader.uint32()); - if (entry4.value !== undefined) { - message.relationMessageMapping[entry4.key] = entry4.value; - } - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): StartFlowInput { - return { - lastSyncState: isSet(object.lastSyncState) ? LastSyncState.fromJSON(object.lastSyncState) : undefined, - flowConnectionConfigs: isSet(object.flowConnectionConfigs) - ? FlowConnectionConfigs.fromJSON(object.flowConnectionConfigs) - : undefined, - syncFlowOptions: isSet(object.syncFlowOptions) ? SyncFlowOptions.fromJSON(object.syncFlowOptions) : undefined, - relationMessageMapping: isObject(object.relationMessageMapping) - ? Object.entries(object.relationMessageMapping).reduce<{ [key: number]: RelationMessage }>( - (acc, [key, value]) => { - acc[Number(key)] = RelationMessage.fromJSON(value); - return acc; - }, - {}, - ) - : {}, - }; - }, - - toJSON(message: StartFlowInput): unknown { - const obj: any = {}; - if (message.lastSyncState !== undefined) { - obj.lastSyncState = LastSyncState.toJSON(message.lastSyncState); - } - if (message.flowConnectionConfigs !== undefined) { - obj.flowConnectionConfigs = FlowConnectionConfigs.toJSON(message.flowConnectionConfigs); - } - if (message.syncFlowOptions !== undefined) { - obj.syncFlowOptions = SyncFlowOptions.toJSON(message.syncFlowOptions); - } - if (message.relationMessageMapping) { - const entries = Object.entries(message.relationMessageMapping); - if (entries.length > 0) { - obj.relationMessageMapping = {}; - entries.forEach(([k, v]) => { - obj.relationMessageMapping[k] = RelationMessage.toJSON(v); - }); - } - } - return obj; - }, - - create, I>>(base?: I): StartFlowInput { - return StartFlowInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): StartFlowInput { - const message = createBaseStartFlowInput(); - message.lastSyncState = (object.lastSyncState !== undefined && object.lastSyncState !== null) - ? LastSyncState.fromPartial(object.lastSyncState) - : undefined; - message.flowConnectionConfigs = - (object.flowConnectionConfigs !== undefined && object.flowConnectionConfigs !== null) - ? FlowConnectionConfigs.fromPartial(object.flowConnectionConfigs) - : undefined; - message.syncFlowOptions = (object.syncFlowOptions !== undefined && object.syncFlowOptions !== null) - ? SyncFlowOptions.fromPartial(object.syncFlowOptions) - : undefined; - message.relationMessageMapping = Object.entries(object.relationMessageMapping ?? {}).reduce< - { [key: number]: RelationMessage } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = RelationMessage.fromPartial(value); - } - return acc; - }, {}); - return message; - }, -}; - -function createBaseStartFlowInput_RelationMessageMappingEntry(): StartFlowInput_RelationMessageMappingEntry { - return { key: 0, value: undefined }; -} - -export const StartFlowInput_RelationMessageMappingEntry = { - encode(message: StartFlowInput_RelationMessageMappingEntry, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.key !== 0) { - writer.uint32(8).uint32(message.key); - } - if (message.value !== undefined) { - RelationMessage.encode(message.value, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): StartFlowInput_RelationMessageMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStartFlowInput_RelationMessageMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.key = reader.uint32(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = RelationMessage.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): StartFlowInput_RelationMessageMappingEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? RelationMessage.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: StartFlowInput_RelationMessageMappingEntry): unknown { - const obj: any = {}; - if (message.key !== 0) { - obj.key = Math.round(message.key); - } - if (message.value !== undefined) { - obj.value = RelationMessage.toJSON(message.value); - } - return obj; - }, - - create, I>>( - base?: I, - ): StartFlowInput_RelationMessageMappingEntry { - return StartFlowInput_RelationMessageMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): StartFlowInput_RelationMessageMappingEntry { - const message = createBaseStartFlowInput_RelationMessageMappingEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? RelationMessage.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseStartNormalizeInput(): StartNormalizeInput { - return { flowConnectionConfigs: undefined }; -} - -export const StartNormalizeInput = { - encode(message: StartNormalizeInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowConnectionConfigs !== undefined) { - FlowConnectionConfigs.encode(message.flowConnectionConfigs, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): StartNormalizeInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStartNormalizeInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.flowConnectionConfigs = FlowConnectionConfigs.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): StartNormalizeInput { - return { - flowConnectionConfigs: isSet(object.flowConnectionConfigs) - ? FlowConnectionConfigs.fromJSON(object.flowConnectionConfigs) - : undefined, - }; - }, - - toJSON(message: StartNormalizeInput): unknown { - const obj: any = {}; - if (message.flowConnectionConfigs !== undefined) { - obj.flowConnectionConfigs = FlowConnectionConfigs.toJSON(message.flowConnectionConfigs); - } - return obj; - }, - - create, I>>(base?: I): StartNormalizeInput { - return StartNormalizeInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): StartNormalizeInput { - const message = createBaseStartNormalizeInput(); - message.flowConnectionConfigs = - (object.flowConnectionConfigs !== undefined && object.flowConnectionConfigs !== null) - ? FlowConnectionConfigs.fromPartial(object.flowConnectionConfigs) - : undefined; - return message; - }, -}; - -function createBaseGetLastSyncedIDInput(): GetLastSyncedIDInput { - return { peerConnectionConfig: undefined, flowJobName: "" }; -} - -export const GetLastSyncedIDInput = { - encode(message: GetLastSyncedIDInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerConnectionConfig !== undefined) { - Peer.encode(message.peerConnectionConfig, writer.uint32(10).fork()).ldelim(); - } - if (message.flowJobName !== "") { - writer.uint32(18).string(message.flowJobName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GetLastSyncedIDInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGetLastSyncedIDInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerConnectionConfig = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.flowJobName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GetLastSyncedIDInput { - return { - peerConnectionConfig: isSet(object.peerConnectionConfig) ? Peer.fromJSON(object.peerConnectionConfig) : undefined, - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - }; - }, - - toJSON(message: GetLastSyncedIDInput): unknown { - const obj: any = {}; - if (message.peerConnectionConfig !== undefined) { - obj.peerConnectionConfig = Peer.toJSON(message.peerConnectionConfig); - } - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - return obj; - }, - - create, I>>(base?: I): GetLastSyncedIDInput { - return GetLastSyncedIDInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): GetLastSyncedIDInput { - const message = createBaseGetLastSyncedIDInput(); - message.peerConnectionConfig = (object.peerConnectionConfig !== undefined && object.peerConnectionConfig !== null) - ? Peer.fromPartial(object.peerConnectionConfig) - : undefined; - message.flowJobName = object.flowJobName ?? ""; - return message; - }, -}; - -function createBaseEnsurePullabilityInput(): EnsurePullabilityInput { - return { peerConnectionConfig: undefined, flowJobName: "", sourceTableIdentifier: "" }; -} - -export const EnsurePullabilityInput = { - encode(message: EnsurePullabilityInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerConnectionConfig !== undefined) { - Peer.encode(message.peerConnectionConfig, writer.uint32(10).fork()).ldelim(); - } - if (message.flowJobName !== "") { - writer.uint32(18).string(message.flowJobName); - } - if (message.sourceTableIdentifier !== "") { - writer.uint32(26).string(message.sourceTableIdentifier); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnsurePullabilityInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnsurePullabilityInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerConnectionConfig = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.sourceTableIdentifier = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnsurePullabilityInput { - return { - peerConnectionConfig: isSet(object.peerConnectionConfig) ? Peer.fromJSON(object.peerConnectionConfig) : undefined, - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - sourceTableIdentifier: isSet(object.sourceTableIdentifier) ? String(object.sourceTableIdentifier) : "", - }; - }, - - toJSON(message: EnsurePullabilityInput): unknown { - const obj: any = {}; - if (message.peerConnectionConfig !== undefined) { - obj.peerConnectionConfig = Peer.toJSON(message.peerConnectionConfig); - } - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.sourceTableIdentifier !== "") { - obj.sourceTableIdentifier = message.sourceTableIdentifier; - } - return obj; - }, - - create, I>>(base?: I): EnsurePullabilityInput { - return EnsurePullabilityInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EnsurePullabilityInput { - const message = createBaseEnsurePullabilityInput(); - message.peerConnectionConfig = (object.peerConnectionConfig !== undefined && object.peerConnectionConfig !== null) - ? Peer.fromPartial(object.peerConnectionConfig) - : undefined; - message.flowJobName = object.flowJobName ?? ""; - message.sourceTableIdentifier = object.sourceTableIdentifier ?? ""; - return message; - }, -}; - -function createBaseEnsurePullabilityBatchInput(): EnsurePullabilityBatchInput { - return { peerConnectionConfig: undefined, flowJobName: "", sourceTableIdentifiers: [] }; -} - -export const EnsurePullabilityBatchInput = { - encode(message: EnsurePullabilityBatchInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerConnectionConfig !== undefined) { - Peer.encode(message.peerConnectionConfig, writer.uint32(10).fork()).ldelim(); - } - if (message.flowJobName !== "") { - writer.uint32(18).string(message.flowJobName); - } - for (const v of message.sourceTableIdentifiers) { - writer.uint32(26).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnsurePullabilityBatchInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnsurePullabilityBatchInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerConnectionConfig = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.sourceTableIdentifiers.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnsurePullabilityBatchInput { - return { - peerConnectionConfig: isSet(object.peerConnectionConfig) ? Peer.fromJSON(object.peerConnectionConfig) : undefined, - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - sourceTableIdentifiers: Array.isArray(object?.sourceTableIdentifiers) - ? object.sourceTableIdentifiers.map((e: any) => String(e)) - : [], - }; - }, - - toJSON(message: EnsurePullabilityBatchInput): unknown { - const obj: any = {}; - if (message.peerConnectionConfig !== undefined) { - obj.peerConnectionConfig = Peer.toJSON(message.peerConnectionConfig); - } - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.sourceTableIdentifiers?.length) { - obj.sourceTableIdentifiers = message.sourceTableIdentifiers; - } - return obj; - }, - - create, I>>(base?: I): EnsurePullabilityBatchInput { - return EnsurePullabilityBatchInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EnsurePullabilityBatchInput { - const message = createBaseEnsurePullabilityBatchInput(); - message.peerConnectionConfig = (object.peerConnectionConfig !== undefined && object.peerConnectionConfig !== null) - ? Peer.fromPartial(object.peerConnectionConfig) - : undefined; - message.flowJobName = object.flowJobName ?? ""; - message.sourceTableIdentifiers = object.sourceTableIdentifiers?.map((e) => e) || []; - return message; - }, -}; - -function createBasePostgresTableIdentifier(): PostgresTableIdentifier { - return { relId: 0 }; -} - -export const PostgresTableIdentifier = { - encode(message: PostgresTableIdentifier, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.relId !== 0) { - writer.uint32(8).uint32(message.relId); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PostgresTableIdentifier { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePostgresTableIdentifier(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.relId = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PostgresTableIdentifier { - return { relId: isSet(object.relId) ? Number(object.relId) : 0 }; - }, - - toJSON(message: PostgresTableIdentifier): unknown { - const obj: any = {}; - if (message.relId !== 0) { - obj.relId = Math.round(message.relId); - } - return obj; - }, - - create, I>>(base?: I): PostgresTableIdentifier { - return PostgresTableIdentifier.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): PostgresTableIdentifier { - const message = createBasePostgresTableIdentifier(); - message.relId = object.relId ?? 0; - return message; - }, -}; - -function createBaseTableIdentifier(): TableIdentifier { - return { postgresTableIdentifier: undefined }; -} - -export const TableIdentifier = { - encode(message: TableIdentifier, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.postgresTableIdentifier !== undefined) { - PostgresTableIdentifier.encode(message.postgresTableIdentifier, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TableIdentifier { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTableIdentifier(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.postgresTableIdentifier = PostgresTableIdentifier.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TableIdentifier { - return { - postgresTableIdentifier: isSet(object.postgresTableIdentifier) - ? PostgresTableIdentifier.fromJSON(object.postgresTableIdentifier) - : undefined, - }; - }, - - toJSON(message: TableIdentifier): unknown { - const obj: any = {}; - if (message.postgresTableIdentifier !== undefined) { - obj.postgresTableIdentifier = PostgresTableIdentifier.toJSON(message.postgresTableIdentifier); - } - return obj; - }, - - create, I>>(base?: I): TableIdentifier { - return TableIdentifier.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TableIdentifier { - const message = createBaseTableIdentifier(); - message.postgresTableIdentifier = - (object.postgresTableIdentifier !== undefined && object.postgresTableIdentifier !== null) - ? PostgresTableIdentifier.fromPartial(object.postgresTableIdentifier) - : undefined; - return message; - }, -}; - -function createBaseEnsurePullabilityOutput(): EnsurePullabilityOutput { - return { tableIdentifier: undefined }; -} - -export const EnsurePullabilityOutput = { - encode(message: EnsurePullabilityOutput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.tableIdentifier !== undefined) { - TableIdentifier.encode(message.tableIdentifier, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnsurePullabilityOutput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnsurePullabilityOutput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.tableIdentifier = TableIdentifier.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnsurePullabilityOutput { - return { - tableIdentifier: isSet(object.tableIdentifier) ? TableIdentifier.fromJSON(object.tableIdentifier) : undefined, - }; - }, - - toJSON(message: EnsurePullabilityOutput): unknown { - const obj: any = {}; - if (message.tableIdentifier !== undefined) { - obj.tableIdentifier = TableIdentifier.toJSON(message.tableIdentifier); - } - return obj; - }, - - create, I>>(base?: I): EnsurePullabilityOutput { - return EnsurePullabilityOutput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EnsurePullabilityOutput { - const message = createBaseEnsurePullabilityOutput(); - message.tableIdentifier = (object.tableIdentifier !== undefined && object.tableIdentifier !== null) - ? TableIdentifier.fromPartial(object.tableIdentifier) - : undefined; - return message; - }, -}; - -function createBaseEnsurePullabilityBatchOutput(): EnsurePullabilityBatchOutput { - return { tableIdentifierMapping: {} }; -} - -export const EnsurePullabilityBatchOutput = { - encode(message: EnsurePullabilityBatchOutput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - Object.entries(message.tableIdentifierMapping).forEach(([key, value]) => { - EnsurePullabilityBatchOutput_TableIdentifierMappingEntry.encode( - { key: key as any, value }, - writer.uint32(10).fork(), - ).ldelim(); - }); - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnsurePullabilityBatchOutput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnsurePullabilityBatchOutput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - const entry1 = EnsurePullabilityBatchOutput_TableIdentifierMappingEntry.decode(reader, reader.uint32()); - if (entry1.value !== undefined) { - message.tableIdentifierMapping[entry1.key] = entry1.value; - } - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnsurePullabilityBatchOutput { - return { - tableIdentifierMapping: isObject(object.tableIdentifierMapping) - ? Object.entries(object.tableIdentifierMapping).reduce<{ [key: string]: TableIdentifier }>( - (acc, [key, value]) => { - acc[key] = TableIdentifier.fromJSON(value); - return acc; - }, - {}, - ) - : {}, - }; - }, - - toJSON(message: EnsurePullabilityBatchOutput): unknown { - const obj: any = {}; - if (message.tableIdentifierMapping) { - const entries = Object.entries(message.tableIdentifierMapping); - if (entries.length > 0) { - obj.tableIdentifierMapping = {}; - entries.forEach(([k, v]) => { - obj.tableIdentifierMapping[k] = TableIdentifier.toJSON(v); - }); - } - } - return obj; - }, - - create, I>>(base?: I): EnsurePullabilityBatchOutput { - return EnsurePullabilityBatchOutput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EnsurePullabilityBatchOutput { - const message = createBaseEnsurePullabilityBatchOutput(); - message.tableIdentifierMapping = Object.entries(object.tableIdentifierMapping ?? {}).reduce< - { [key: string]: TableIdentifier } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[key] = TableIdentifier.fromPartial(value); - } - return acc; - }, {}); - return message; - }, -}; - -function createBaseEnsurePullabilityBatchOutput_TableIdentifierMappingEntry(): EnsurePullabilityBatchOutput_TableIdentifierMappingEntry { - return { key: "", value: undefined }; -} - -export const EnsurePullabilityBatchOutput_TableIdentifierMappingEntry = { - encode( - message: EnsurePullabilityBatchOutput_TableIdentifierMappingEntry, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== undefined) { - TableIdentifier.encode(message.value, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnsurePullabilityBatchOutput_TableIdentifierMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnsurePullabilityBatchOutput_TableIdentifierMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = TableIdentifier.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnsurePullabilityBatchOutput_TableIdentifierMappingEntry { - return { - key: isSet(object.key) ? String(object.key) : "", - value: isSet(object.value) ? TableIdentifier.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: EnsurePullabilityBatchOutput_TableIdentifierMappingEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== undefined) { - obj.value = TableIdentifier.toJSON(message.value); - } - return obj; - }, - - create, I>>( - base?: I, - ): EnsurePullabilityBatchOutput_TableIdentifierMappingEntry { - return EnsurePullabilityBatchOutput_TableIdentifierMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): EnsurePullabilityBatchOutput_TableIdentifierMappingEntry { - const message = createBaseEnsurePullabilityBatchOutput_TableIdentifierMappingEntry(); - message.key = object.key ?? ""; - message.value = (object.value !== undefined && object.value !== null) - ? TableIdentifier.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseSetupReplicationInput(): SetupReplicationInput { - return { - peerConnectionConfig: undefined, - flowJobName: "", - tableNameMapping: {}, - destinationPeer: undefined, - doInitialCopy: false, - existingPublicationName: "", - existingReplicationSlotName: "", - }; -} - -export const SetupReplicationInput = { - encode(message: SetupReplicationInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerConnectionConfig !== undefined) { - Peer.encode(message.peerConnectionConfig, writer.uint32(10).fork()).ldelim(); - } - if (message.flowJobName !== "") { - writer.uint32(18).string(message.flowJobName); - } - Object.entries(message.tableNameMapping).forEach(([key, value]) => { - SetupReplicationInput_TableNameMappingEntry.encode({ key: key as any, value }, writer.uint32(26).fork()).ldelim(); - }); - if (message.destinationPeer !== undefined) { - Peer.encode(message.destinationPeer, writer.uint32(34).fork()).ldelim(); - } - if (message.doInitialCopy === true) { - writer.uint32(40).bool(message.doInitialCopy); - } - if (message.existingPublicationName !== "") { - writer.uint32(50).string(message.existingPublicationName); - } - if (message.existingReplicationSlotName !== "") { - writer.uint32(58).string(message.existingReplicationSlotName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupReplicationInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupReplicationInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerConnectionConfig = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - const entry3 = SetupReplicationInput_TableNameMappingEntry.decode(reader, reader.uint32()); - if (entry3.value !== undefined) { - message.tableNameMapping[entry3.key] = entry3.value; - } - continue; - case 4: - if (tag !== 34) { - break; - } - - message.destinationPeer = Peer.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 40) { - break; - } - - message.doInitialCopy = reader.bool(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.existingPublicationName = reader.string(); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.existingReplicationSlotName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupReplicationInput { - return { - peerConnectionConfig: isSet(object.peerConnectionConfig) ? Peer.fromJSON(object.peerConnectionConfig) : undefined, - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - tableNameMapping: isObject(object.tableNameMapping) - ? Object.entries(object.tableNameMapping).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - destinationPeer: isSet(object.destinationPeer) ? Peer.fromJSON(object.destinationPeer) : undefined, - doInitialCopy: isSet(object.doInitialCopy) ? Boolean(object.doInitialCopy) : false, - existingPublicationName: isSet(object.existingPublicationName) ? String(object.existingPublicationName) : "", - existingReplicationSlotName: isSet(object.existingReplicationSlotName) - ? String(object.existingReplicationSlotName) - : "", - }; - }, - - toJSON(message: SetupReplicationInput): unknown { - const obj: any = {}; - if (message.peerConnectionConfig !== undefined) { - obj.peerConnectionConfig = Peer.toJSON(message.peerConnectionConfig); - } - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.tableNameMapping) { - const entries = Object.entries(message.tableNameMapping); - if (entries.length > 0) { - obj.tableNameMapping = {}; - entries.forEach(([k, v]) => { - obj.tableNameMapping[k] = v; - }); - } - } - if (message.destinationPeer !== undefined) { - obj.destinationPeer = Peer.toJSON(message.destinationPeer); - } - if (message.doInitialCopy === true) { - obj.doInitialCopy = message.doInitialCopy; - } - if (message.existingPublicationName !== "") { - obj.existingPublicationName = message.existingPublicationName; - } - if (message.existingReplicationSlotName !== "") { - obj.existingReplicationSlotName = message.existingReplicationSlotName; - } - return obj; - }, - - create, I>>(base?: I): SetupReplicationInput { - return SetupReplicationInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SetupReplicationInput { - const message = createBaseSetupReplicationInput(); - message.peerConnectionConfig = (object.peerConnectionConfig !== undefined && object.peerConnectionConfig !== null) - ? Peer.fromPartial(object.peerConnectionConfig) - : undefined; - message.flowJobName = object.flowJobName ?? ""; - message.tableNameMapping = Object.entries(object.tableNameMapping ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.destinationPeer = (object.destinationPeer !== undefined && object.destinationPeer !== null) - ? Peer.fromPartial(object.destinationPeer) - : undefined; - message.doInitialCopy = object.doInitialCopy ?? false; - message.existingPublicationName = object.existingPublicationName ?? ""; - message.existingReplicationSlotName = object.existingReplicationSlotName ?? ""; - return message; - }, -}; - -function createBaseSetupReplicationInput_TableNameMappingEntry(): SetupReplicationInput_TableNameMappingEntry { - return { key: "", value: "" }; -} - -export const SetupReplicationInput_TableNameMappingEntry = { - encode(message: SetupReplicationInput_TableNameMappingEntry, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== "") { - writer.uint32(18).string(message.value); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupReplicationInput_TableNameMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupReplicationInput_TableNameMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupReplicationInput_TableNameMappingEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: SetupReplicationInput_TableNameMappingEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== "") { - obj.value = message.value; - } - return obj; - }, - - create, I>>( - base?: I, - ): SetupReplicationInput_TableNameMappingEntry { - return SetupReplicationInput_TableNameMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): SetupReplicationInput_TableNameMappingEntry { - const message = createBaseSetupReplicationInput_TableNameMappingEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseSetupReplicationOutput(): SetupReplicationOutput { - return { slotName: "", snapshotName: "" }; -} - -export const SetupReplicationOutput = { - encode(message: SetupReplicationOutput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.slotName !== "") { - writer.uint32(10).string(message.slotName); - } - if (message.snapshotName !== "") { - writer.uint32(18).string(message.snapshotName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupReplicationOutput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupReplicationOutput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.slotName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.snapshotName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupReplicationOutput { - return { - slotName: isSet(object.slotName) ? String(object.slotName) : "", - snapshotName: isSet(object.snapshotName) ? String(object.snapshotName) : "", - }; - }, - - toJSON(message: SetupReplicationOutput): unknown { - const obj: any = {}; - if (message.slotName !== "") { - obj.slotName = message.slotName; - } - if (message.snapshotName !== "") { - obj.snapshotName = message.snapshotName; - } - return obj; - }, - - create, I>>(base?: I): SetupReplicationOutput { - return SetupReplicationOutput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SetupReplicationOutput { - const message = createBaseSetupReplicationOutput(); - message.slotName = object.slotName ?? ""; - message.snapshotName = object.snapshotName ?? ""; - return message; - }, -}; - -function createBaseCreateRawTableInput(): CreateRawTableInput { - return { peerConnectionConfig: undefined, flowJobName: "", tableNameMapping: {}, cdcSyncMode: 0 }; -} - -export const CreateRawTableInput = { - encode(message: CreateRawTableInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerConnectionConfig !== undefined) { - Peer.encode(message.peerConnectionConfig, writer.uint32(10).fork()).ldelim(); - } - if (message.flowJobName !== "") { - writer.uint32(18).string(message.flowJobName); - } - Object.entries(message.tableNameMapping).forEach(([key, value]) => { - CreateRawTableInput_TableNameMappingEntry.encode({ key: key as any, value }, writer.uint32(26).fork()).ldelim(); - }); - if (message.cdcSyncMode !== 0) { - writer.uint32(32).int32(message.cdcSyncMode); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreateRawTableInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateRawTableInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerConnectionConfig = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - const entry3 = CreateRawTableInput_TableNameMappingEntry.decode(reader, reader.uint32()); - if (entry3.value !== undefined) { - message.tableNameMapping[entry3.key] = entry3.value; - } - continue; - case 4: - if (tag !== 32) { - break; - } - - message.cdcSyncMode = reader.int32() as any; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateRawTableInput { - return { - peerConnectionConfig: isSet(object.peerConnectionConfig) ? Peer.fromJSON(object.peerConnectionConfig) : undefined, - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - tableNameMapping: isObject(object.tableNameMapping) - ? Object.entries(object.tableNameMapping).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - cdcSyncMode: isSet(object.cdcSyncMode) ? qRepSyncModeFromJSON(object.cdcSyncMode) : 0, - }; - }, - - toJSON(message: CreateRawTableInput): unknown { - const obj: any = {}; - if (message.peerConnectionConfig !== undefined) { - obj.peerConnectionConfig = Peer.toJSON(message.peerConnectionConfig); - } - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.tableNameMapping) { - const entries = Object.entries(message.tableNameMapping); - if (entries.length > 0) { - obj.tableNameMapping = {}; - entries.forEach(([k, v]) => { - obj.tableNameMapping[k] = v; - }); - } - } - if (message.cdcSyncMode !== 0) { - obj.cdcSyncMode = qRepSyncModeToJSON(message.cdcSyncMode); - } - return obj; - }, - - create, I>>(base?: I): CreateRawTableInput { - return CreateRawTableInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CreateRawTableInput { - const message = createBaseCreateRawTableInput(); - message.peerConnectionConfig = (object.peerConnectionConfig !== undefined && object.peerConnectionConfig !== null) - ? Peer.fromPartial(object.peerConnectionConfig) - : undefined; - message.flowJobName = object.flowJobName ?? ""; - message.tableNameMapping = Object.entries(object.tableNameMapping ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.cdcSyncMode = object.cdcSyncMode ?? 0; - return message; - }, -}; - -function createBaseCreateRawTableInput_TableNameMappingEntry(): CreateRawTableInput_TableNameMappingEntry { - return { key: "", value: "" }; -} - -export const CreateRawTableInput_TableNameMappingEntry = { - encode(message: CreateRawTableInput_TableNameMappingEntry, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== "") { - writer.uint32(18).string(message.value); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreateRawTableInput_TableNameMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateRawTableInput_TableNameMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateRawTableInput_TableNameMappingEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: CreateRawTableInput_TableNameMappingEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== "") { - obj.value = message.value; - } - return obj; - }, - - create, I>>( - base?: I, - ): CreateRawTableInput_TableNameMappingEntry { - return CreateRawTableInput_TableNameMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): CreateRawTableInput_TableNameMappingEntry { - const message = createBaseCreateRawTableInput_TableNameMappingEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseCreateRawTableOutput(): CreateRawTableOutput { - return { tableIdentifier: "" }; -} - -export const CreateRawTableOutput = { - encode(message: CreateRawTableOutput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.tableIdentifier !== "") { - writer.uint32(10).string(message.tableIdentifier); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreateRawTableOutput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateRawTableOutput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.tableIdentifier = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateRawTableOutput { - return { tableIdentifier: isSet(object.tableIdentifier) ? String(object.tableIdentifier) : "" }; - }, - - toJSON(message: CreateRawTableOutput): unknown { - const obj: any = {}; - if (message.tableIdentifier !== "") { - obj.tableIdentifier = message.tableIdentifier; - } - return obj; - }, - - create, I>>(base?: I): CreateRawTableOutput { - return CreateRawTableOutput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CreateRawTableOutput { - const message = createBaseCreateRawTableOutput(); - message.tableIdentifier = object.tableIdentifier ?? ""; - return message; - }, -}; - -function createBaseTableSchema(): TableSchema { - return { - tableIdentifier: "", - columns: {}, - primaryKeyColumns: [], - isReplicaIdentityFull: false, - columnNames: [], - columnTypes: [], - }; -} - -export const TableSchema = { - encode(message: TableSchema, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.tableIdentifier !== "") { - writer.uint32(10).string(message.tableIdentifier); - } - Object.entries(message.columns).forEach(([key, value]) => { - TableSchema_ColumnsEntry.encode({ key: key as any, value }, writer.uint32(18).fork()).ldelim(); - }); - for (const v of message.primaryKeyColumns) { - writer.uint32(26).string(v!); - } - if (message.isReplicaIdentityFull === true) { - writer.uint32(32).bool(message.isReplicaIdentityFull); - } - for (const v of message.columnNames) { - writer.uint32(42).string(v!); - } - for (const v of message.columnTypes) { - writer.uint32(50).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TableSchema { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTableSchema(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.tableIdentifier = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - const entry2 = TableSchema_ColumnsEntry.decode(reader, reader.uint32()); - if (entry2.value !== undefined) { - message.columns[entry2.key] = entry2.value; - } - continue; - case 3: - if (tag !== 26) { - break; - } - - message.primaryKeyColumns.push(reader.string()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.isReplicaIdentityFull = reader.bool(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.columnNames.push(reader.string()); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.columnTypes.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TableSchema { - return { - tableIdentifier: isSet(object.tableIdentifier) ? String(object.tableIdentifier) : "", - columns: isObject(object.columns) - ? Object.entries(object.columns).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - primaryKeyColumns: Array.isArray(object?.primaryKeyColumns) - ? object.primaryKeyColumns.map((e: any) => String(e)) - : [], - isReplicaIdentityFull: isSet(object.isReplicaIdentityFull) ? Boolean(object.isReplicaIdentityFull) : false, - columnNames: Array.isArray(object?.columnNames) ? object.columnNames.map((e: any) => String(e)) : [], - columnTypes: Array.isArray(object?.columnTypes) ? object.columnTypes.map((e: any) => String(e)) : [], - }; - }, - - toJSON(message: TableSchema): unknown { - const obj: any = {}; - if (message.tableIdentifier !== "") { - obj.tableIdentifier = message.tableIdentifier; - } - if (message.columns) { - const entries = Object.entries(message.columns); - if (entries.length > 0) { - obj.columns = {}; - entries.forEach(([k, v]) => { - obj.columns[k] = v; - }); - } - } - if (message.primaryKeyColumns?.length) { - obj.primaryKeyColumns = message.primaryKeyColumns; - } - if (message.isReplicaIdentityFull === true) { - obj.isReplicaIdentityFull = message.isReplicaIdentityFull; - } - if (message.columnNames?.length) { - obj.columnNames = message.columnNames; - } - if (message.columnTypes?.length) { - obj.columnTypes = message.columnTypes; - } - return obj; - }, - - create, I>>(base?: I): TableSchema { - return TableSchema.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TableSchema { - const message = createBaseTableSchema(); - message.tableIdentifier = object.tableIdentifier ?? ""; - message.columns = Object.entries(object.columns ?? {}).reduce<{ [key: string]: string }>((acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, {}); - message.primaryKeyColumns = object.primaryKeyColumns?.map((e) => e) || []; - message.isReplicaIdentityFull = object.isReplicaIdentityFull ?? false; - message.columnNames = object.columnNames?.map((e) => e) || []; - message.columnTypes = object.columnTypes?.map((e) => e) || []; - return message; - }, -}; - -function createBaseTableSchema_ColumnsEntry(): TableSchema_ColumnsEntry { - return { key: "", value: "" }; -} - -export const TableSchema_ColumnsEntry = { - encode(message: TableSchema_ColumnsEntry, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== "") { - writer.uint32(18).string(message.value); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TableSchema_ColumnsEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTableSchema_ColumnsEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TableSchema_ColumnsEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: TableSchema_ColumnsEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== "") { - obj.value = message.value; - } - return obj; - }, - - create, I>>(base?: I): TableSchema_ColumnsEntry { - return TableSchema_ColumnsEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TableSchema_ColumnsEntry { - const message = createBaseTableSchema_ColumnsEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseGetTableSchemaBatchInput(): GetTableSchemaBatchInput { - return { peerConnectionConfig: undefined, tableIdentifiers: [], flowName: "", skipPkeyAndReplicaCheck: false }; -} - -export const GetTableSchemaBatchInput = { - encode(message: GetTableSchemaBatchInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerConnectionConfig !== undefined) { - Peer.encode(message.peerConnectionConfig, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.tableIdentifiers) { - writer.uint32(18).string(v!); - } - if (message.flowName !== "") { - writer.uint32(26).string(message.flowName); - } - if (message.skipPkeyAndReplicaCheck === true) { - writer.uint32(32).bool(message.skipPkeyAndReplicaCheck); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GetTableSchemaBatchInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGetTableSchemaBatchInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerConnectionConfig = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.tableIdentifiers.push(reader.string()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.flowName = reader.string(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.skipPkeyAndReplicaCheck = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GetTableSchemaBatchInput { - return { - peerConnectionConfig: isSet(object.peerConnectionConfig) ? Peer.fromJSON(object.peerConnectionConfig) : undefined, - tableIdentifiers: Array.isArray(object?.tableIdentifiers) - ? object.tableIdentifiers.map((e: any) => String(e)) - : [], - flowName: isSet(object.flowName) ? String(object.flowName) : "", - skipPkeyAndReplicaCheck: isSet(object.skipPkeyAndReplicaCheck) ? Boolean(object.skipPkeyAndReplicaCheck) : false, - }; - }, - - toJSON(message: GetTableSchemaBatchInput): unknown { - const obj: any = {}; - if (message.peerConnectionConfig !== undefined) { - obj.peerConnectionConfig = Peer.toJSON(message.peerConnectionConfig); - } - if (message.tableIdentifiers?.length) { - obj.tableIdentifiers = message.tableIdentifiers; - } - if (message.flowName !== "") { - obj.flowName = message.flowName; - } - if (message.skipPkeyAndReplicaCheck === true) { - obj.skipPkeyAndReplicaCheck = message.skipPkeyAndReplicaCheck; - } - return obj; - }, - - create, I>>(base?: I): GetTableSchemaBatchInput { - return GetTableSchemaBatchInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): GetTableSchemaBatchInput { - const message = createBaseGetTableSchemaBatchInput(); - message.peerConnectionConfig = (object.peerConnectionConfig !== undefined && object.peerConnectionConfig !== null) - ? Peer.fromPartial(object.peerConnectionConfig) - : undefined; - message.tableIdentifiers = object.tableIdentifiers?.map((e) => e) || []; - message.flowName = object.flowName ?? ""; - message.skipPkeyAndReplicaCheck = object.skipPkeyAndReplicaCheck ?? false; - return message; - }, -}; - -function createBaseGetTableSchemaBatchOutput(): GetTableSchemaBatchOutput { - return { tableNameSchemaMapping: {} }; -} - -export const GetTableSchemaBatchOutput = { - encode(message: GetTableSchemaBatchOutput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - Object.entries(message.tableNameSchemaMapping).forEach(([key, value]) => { - GetTableSchemaBatchOutput_TableNameSchemaMappingEntry.encode({ key: key as any, value }, writer.uint32(10).fork()) - .ldelim(); - }); - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GetTableSchemaBatchOutput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGetTableSchemaBatchOutput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - const entry1 = GetTableSchemaBatchOutput_TableNameSchemaMappingEntry.decode(reader, reader.uint32()); - if (entry1.value !== undefined) { - message.tableNameSchemaMapping[entry1.key] = entry1.value; - } - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GetTableSchemaBatchOutput { - return { - tableNameSchemaMapping: isObject(object.tableNameSchemaMapping) - ? Object.entries(object.tableNameSchemaMapping).reduce<{ [key: string]: TableSchema }>((acc, [key, value]) => { - acc[key] = TableSchema.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: GetTableSchemaBatchOutput): unknown { - const obj: any = {}; - if (message.tableNameSchemaMapping) { - const entries = Object.entries(message.tableNameSchemaMapping); - if (entries.length > 0) { - obj.tableNameSchemaMapping = {}; - entries.forEach(([k, v]) => { - obj.tableNameSchemaMapping[k] = TableSchema.toJSON(v); - }); - } - } - return obj; - }, - - create, I>>(base?: I): GetTableSchemaBatchOutput { - return GetTableSchemaBatchOutput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): GetTableSchemaBatchOutput { - const message = createBaseGetTableSchemaBatchOutput(); - message.tableNameSchemaMapping = Object.entries(object.tableNameSchemaMapping ?? {}).reduce< - { [key: string]: TableSchema } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[key] = TableSchema.fromPartial(value); - } - return acc; - }, {}); - return message; - }, -}; - -function createBaseGetTableSchemaBatchOutput_TableNameSchemaMappingEntry(): GetTableSchemaBatchOutput_TableNameSchemaMappingEntry { - return { key: "", value: undefined }; -} - -export const GetTableSchemaBatchOutput_TableNameSchemaMappingEntry = { - encode( - message: GetTableSchemaBatchOutput_TableNameSchemaMappingEntry, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== undefined) { - TableSchema.encode(message.value, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GetTableSchemaBatchOutput_TableNameSchemaMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGetTableSchemaBatchOutput_TableNameSchemaMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = TableSchema.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GetTableSchemaBatchOutput_TableNameSchemaMappingEntry { - return { - key: isSet(object.key) ? String(object.key) : "", - value: isSet(object.value) ? TableSchema.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: GetTableSchemaBatchOutput_TableNameSchemaMappingEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== undefined) { - obj.value = TableSchema.toJSON(message.value); - } - return obj; - }, - - create, I>>( - base?: I, - ): GetTableSchemaBatchOutput_TableNameSchemaMappingEntry { - return GetTableSchemaBatchOutput_TableNameSchemaMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): GetTableSchemaBatchOutput_TableNameSchemaMappingEntry { - const message = createBaseGetTableSchemaBatchOutput_TableNameSchemaMappingEntry(); - message.key = object.key ?? ""; - message.value = (object.value !== undefined && object.value !== null) - ? TableSchema.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseSetupNormalizedTableInput(): SetupNormalizedTableInput { - return { peerConnectionConfig: undefined, tableIdentifier: "", sourceTableSchema: undefined }; -} - -export const SetupNormalizedTableInput = { - encode(message: SetupNormalizedTableInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerConnectionConfig !== undefined) { - Peer.encode(message.peerConnectionConfig, writer.uint32(10).fork()).ldelim(); - } - if (message.tableIdentifier !== "") { - writer.uint32(18).string(message.tableIdentifier); - } - if (message.sourceTableSchema !== undefined) { - TableSchema.encode(message.sourceTableSchema, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupNormalizedTableInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupNormalizedTableInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerConnectionConfig = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.tableIdentifier = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.sourceTableSchema = TableSchema.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupNormalizedTableInput { - return { - peerConnectionConfig: isSet(object.peerConnectionConfig) ? Peer.fromJSON(object.peerConnectionConfig) : undefined, - tableIdentifier: isSet(object.tableIdentifier) ? String(object.tableIdentifier) : "", - sourceTableSchema: isSet(object.sourceTableSchema) ? TableSchema.fromJSON(object.sourceTableSchema) : undefined, - }; - }, - - toJSON(message: SetupNormalizedTableInput): unknown { - const obj: any = {}; - if (message.peerConnectionConfig !== undefined) { - obj.peerConnectionConfig = Peer.toJSON(message.peerConnectionConfig); - } - if (message.tableIdentifier !== "") { - obj.tableIdentifier = message.tableIdentifier; - } - if (message.sourceTableSchema !== undefined) { - obj.sourceTableSchema = TableSchema.toJSON(message.sourceTableSchema); - } - return obj; - }, - - create, I>>(base?: I): SetupNormalizedTableInput { - return SetupNormalizedTableInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SetupNormalizedTableInput { - const message = createBaseSetupNormalizedTableInput(); - message.peerConnectionConfig = (object.peerConnectionConfig !== undefined && object.peerConnectionConfig !== null) - ? Peer.fromPartial(object.peerConnectionConfig) - : undefined; - message.tableIdentifier = object.tableIdentifier ?? ""; - message.sourceTableSchema = (object.sourceTableSchema !== undefined && object.sourceTableSchema !== null) - ? TableSchema.fromPartial(object.sourceTableSchema) - : undefined; - return message; - }, -}; - -function createBaseSetupNormalizedTableBatchInput(): SetupNormalizedTableBatchInput { - return { - peerConnectionConfig: undefined, - tableNameSchemaMapping: {}, - softDeleteColName: "", - syncedAtColName: "", - flowName: "", - }; -} - -export const SetupNormalizedTableBatchInput = { - encode(message: SetupNormalizedTableBatchInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerConnectionConfig !== undefined) { - Peer.encode(message.peerConnectionConfig, writer.uint32(10).fork()).ldelim(); - } - Object.entries(message.tableNameSchemaMapping).forEach(([key, value]) => { - SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry.encode( - { key: key as any, value }, - writer.uint32(18).fork(), - ).ldelim(); - }); - if (message.softDeleteColName !== "") { - writer.uint32(34).string(message.softDeleteColName); - } - if (message.syncedAtColName !== "") { - writer.uint32(42).string(message.syncedAtColName); - } - if (message.flowName !== "") { - writer.uint32(50).string(message.flowName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupNormalizedTableBatchInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupNormalizedTableBatchInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerConnectionConfig = Peer.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - const entry2 = SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry.decode(reader, reader.uint32()); - if (entry2.value !== undefined) { - message.tableNameSchemaMapping[entry2.key] = entry2.value; - } - continue; - case 4: - if (tag !== 34) { - break; - } - - message.softDeleteColName = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.syncedAtColName = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.flowName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupNormalizedTableBatchInput { - return { - peerConnectionConfig: isSet(object.peerConnectionConfig) ? Peer.fromJSON(object.peerConnectionConfig) : undefined, - tableNameSchemaMapping: isObject(object.tableNameSchemaMapping) - ? Object.entries(object.tableNameSchemaMapping).reduce<{ [key: string]: TableSchema }>((acc, [key, value]) => { - acc[key] = TableSchema.fromJSON(value); - return acc; - }, {}) - : {}, - softDeleteColName: isSet(object.softDeleteColName) ? String(object.softDeleteColName) : "", - syncedAtColName: isSet(object.syncedAtColName) ? String(object.syncedAtColName) : "", - flowName: isSet(object.flowName) ? String(object.flowName) : "", - }; - }, - - toJSON(message: SetupNormalizedTableBatchInput): unknown { - const obj: any = {}; - if (message.peerConnectionConfig !== undefined) { - obj.peerConnectionConfig = Peer.toJSON(message.peerConnectionConfig); - } - if (message.tableNameSchemaMapping) { - const entries = Object.entries(message.tableNameSchemaMapping); - if (entries.length > 0) { - obj.tableNameSchemaMapping = {}; - entries.forEach(([k, v]) => { - obj.tableNameSchemaMapping[k] = TableSchema.toJSON(v); - }); - } - } - if (message.softDeleteColName !== "") { - obj.softDeleteColName = message.softDeleteColName; - } - if (message.syncedAtColName !== "") { - obj.syncedAtColName = message.syncedAtColName; - } - if (message.flowName !== "") { - obj.flowName = message.flowName; - } - return obj; - }, - - create, I>>(base?: I): SetupNormalizedTableBatchInput { - return SetupNormalizedTableBatchInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): SetupNormalizedTableBatchInput { - const message = createBaseSetupNormalizedTableBatchInput(); - message.peerConnectionConfig = (object.peerConnectionConfig !== undefined && object.peerConnectionConfig !== null) - ? Peer.fromPartial(object.peerConnectionConfig) - : undefined; - message.tableNameSchemaMapping = Object.entries(object.tableNameSchemaMapping ?? {}).reduce< - { [key: string]: TableSchema } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[key] = TableSchema.fromPartial(value); - } - return acc; - }, {}); - message.softDeleteColName = object.softDeleteColName ?? ""; - message.syncedAtColName = object.syncedAtColName ?? ""; - message.flowName = object.flowName ?? ""; - return message; - }, -}; - -function createBaseSetupNormalizedTableBatchInput_TableNameSchemaMappingEntry(): SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry { - return { key: "", value: undefined }; -} - -export const SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry = { - encode( - message: SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== undefined) { - TableSchema.encode(message.value, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupNormalizedTableBatchInput_TableNameSchemaMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = TableSchema.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry { - return { - key: isSet(object.key) ? String(object.key) : "", - value: isSet(object.value) ? TableSchema.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== undefined) { - obj.value = TableSchema.toJSON(message.value); - } - return obj; - }, - - create, I>>( - base?: I, - ): SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry { - return SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): SetupNormalizedTableBatchInput_TableNameSchemaMappingEntry { - const message = createBaseSetupNormalizedTableBatchInput_TableNameSchemaMappingEntry(); - message.key = object.key ?? ""; - message.value = (object.value !== undefined && object.value !== null) - ? TableSchema.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseSetupNormalizedTableOutput(): SetupNormalizedTableOutput { - return { tableIdentifier: "", alreadyExists: false }; -} - -export const SetupNormalizedTableOutput = { - encode(message: SetupNormalizedTableOutput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.tableIdentifier !== "") { - writer.uint32(10).string(message.tableIdentifier); - } - if (message.alreadyExists === true) { - writer.uint32(16).bool(message.alreadyExists); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupNormalizedTableOutput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupNormalizedTableOutput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.tableIdentifier = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.alreadyExists = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupNormalizedTableOutput { - return { - tableIdentifier: isSet(object.tableIdentifier) ? String(object.tableIdentifier) : "", - alreadyExists: isSet(object.alreadyExists) ? Boolean(object.alreadyExists) : false, - }; - }, - - toJSON(message: SetupNormalizedTableOutput): unknown { - const obj: any = {}; - if (message.tableIdentifier !== "") { - obj.tableIdentifier = message.tableIdentifier; - } - if (message.alreadyExists === true) { - obj.alreadyExists = message.alreadyExists; - } - return obj; - }, - - create, I>>(base?: I): SetupNormalizedTableOutput { - return SetupNormalizedTableOutput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SetupNormalizedTableOutput { - const message = createBaseSetupNormalizedTableOutput(); - message.tableIdentifier = object.tableIdentifier ?? ""; - message.alreadyExists = object.alreadyExists ?? false; - return message; - }, -}; - -function createBaseSetupNormalizedTableBatchOutput(): SetupNormalizedTableBatchOutput { - return { tableExistsMapping: {} }; -} - -export const SetupNormalizedTableBatchOutput = { - encode(message: SetupNormalizedTableBatchOutput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - Object.entries(message.tableExistsMapping).forEach(([key, value]) => { - SetupNormalizedTableBatchOutput_TableExistsMappingEntry.encode( - { key: key as any, value }, - writer.uint32(10).fork(), - ).ldelim(); - }); - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupNormalizedTableBatchOutput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupNormalizedTableBatchOutput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - const entry1 = SetupNormalizedTableBatchOutput_TableExistsMappingEntry.decode(reader, reader.uint32()); - if (entry1.value !== undefined) { - message.tableExistsMapping[entry1.key] = entry1.value; - } - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupNormalizedTableBatchOutput { - return { - tableExistsMapping: isObject(object.tableExistsMapping) - ? Object.entries(object.tableExistsMapping).reduce<{ [key: string]: boolean }>((acc, [key, value]) => { - acc[key] = Boolean(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: SetupNormalizedTableBatchOutput): unknown { - const obj: any = {}; - if (message.tableExistsMapping) { - const entries = Object.entries(message.tableExistsMapping); - if (entries.length > 0) { - obj.tableExistsMapping = {}; - entries.forEach(([k, v]) => { - obj.tableExistsMapping[k] = v; - }); - } - } - return obj; - }, - - create, I>>(base?: I): SetupNormalizedTableBatchOutput { - return SetupNormalizedTableBatchOutput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): SetupNormalizedTableBatchOutput { - const message = createBaseSetupNormalizedTableBatchOutput(); - message.tableExistsMapping = Object.entries(object.tableExistsMapping ?? {}).reduce<{ [key: string]: boolean }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = Boolean(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseSetupNormalizedTableBatchOutput_TableExistsMappingEntry(): SetupNormalizedTableBatchOutput_TableExistsMappingEntry { - return { key: "", value: false }; -} - -export const SetupNormalizedTableBatchOutput_TableExistsMappingEntry = { - encode( - message: SetupNormalizedTableBatchOutput_TableExistsMappingEntry, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value === true) { - writer.uint32(16).bool(message.value); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SetupNormalizedTableBatchOutput_TableExistsMappingEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSetupNormalizedTableBatchOutput_TableExistsMappingEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.value = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SetupNormalizedTableBatchOutput_TableExistsMappingEntry { - return { - key: isSet(object.key) ? String(object.key) : "", - value: isSet(object.value) ? Boolean(object.value) : false, - }; - }, - - toJSON(message: SetupNormalizedTableBatchOutput_TableExistsMappingEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value === true) { - obj.value = message.value; - } - return obj; - }, - - create, I>>( - base?: I, - ): SetupNormalizedTableBatchOutput_TableExistsMappingEntry { - return SetupNormalizedTableBatchOutput_TableExistsMappingEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): SetupNormalizedTableBatchOutput_TableExistsMappingEntry { - const message = createBaseSetupNormalizedTableBatchOutput_TableExistsMappingEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? false; - return message; - }, -}; - -function createBaseIntPartitionRange(): IntPartitionRange { - return { start: 0, end: 0 }; -} - -export const IntPartitionRange = { - encode(message: IntPartitionRange, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.start !== 0) { - writer.uint32(8).int64(message.start); - } - if (message.end !== 0) { - writer.uint32(16).int64(message.end); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): IntPartitionRange { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseIntPartitionRange(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.start = longToNumber(reader.int64() as Long); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.end = longToNumber(reader.int64() as Long); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): IntPartitionRange { - return { start: isSet(object.start) ? Number(object.start) : 0, end: isSet(object.end) ? Number(object.end) : 0 }; - }, - - toJSON(message: IntPartitionRange): unknown { - const obj: any = {}; - if (message.start !== 0) { - obj.start = Math.round(message.start); - } - if (message.end !== 0) { - obj.end = Math.round(message.end); - } - return obj; - }, - - create, I>>(base?: I): IntPartitionRange { - return IntPartitionRange.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): IntPartitionRange { - const message = createBaseIntPartitionRange(); - message.start = object.start ?? 0; - message.end = object.end ?? 0; - return message; - }, -}; - -function createBaseTimestampPartitionRange(): TimestampPartitionRange { - return { start: undefined, end: undefined }; -} - -export const TimestampPartitionRange = { - encode(message: TimestampPartitionRange, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.start !== undefined) { - Timestamp.encode(toTimestamp(message.start), writer.uint32(10).fork()).ldelim(); - } - if (message.end !== undefined) { - Timestamp.encode(toTimestamp(message.end), writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TimestampPartitionRange { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTimestampPartitionRange(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.start = fromTimestamp(Timestamp.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.end = fromTimestamp(Timestamp.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TimestampPartitionRange { - return { - start: isSet(object.start) ? fromJsonTimestamp(object.start) : undefined, - end: isSet(object.end) ? fromJsonTimestamp(object.end) : undefined, - }; - }, - - toJSON(message: TimestampPartitionRange): unknown { - const obj: any = {}; - if (message.start !== undefined) { - obj.start = message.start.toISOString(); - } - if (message.end !== undefined) { - obj.end = message.end.toISOString(); - } - return obj; - }, - - create, I>>(base?: I): TimestampPartitionRange { - return TimestampPartitionRange.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TimestampPartitionRange { - const message = createBaseTimestampPartitionRange(); - message.start = object.start ?? undefined; - message.end = object.end ?? undefined; - return message; - }, -}; - -function createBaseTID(): TID { - return { blockNumber: 0, offsetNumber: 0 }; -} - -export const TID = { - encode(message: TID, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.blockNumber !== 0) { - writer.uint32(8).uint32(message.blockNumber); - } - if (message.offsetNumber !== 0) { - writer.uint32(16).uint32(message.offsetNumber); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TID { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.blockNumber = reader.uint32(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.offsetNumber = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TID { - return { - blockNumber: isSet(object.blockNumber) ? Number(object.blockNumber) : 0, - offsetNumber: isSet(object.offsetNumber) ? Number(object.offsetNumber) : 0, - }; - }, - - toJSON(message: TID): unknown { - const obj: any = {}; - if (message.blockNumber !== 0) { - obj.blockNumber = Math.round(message.blockNumber); - } - if (message.offsetNumber !== 0) { - obj.offsetNumber = Math.round(message.offsetNumber); - } - return obj; - }, - - create, I>>(base?: I): TID { - return TID.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TID { - const message = createBaseTID(); - message.blockNumber = object.blockNumber ?? 0; - message.offsetNumber = object.offsetNumber ?? 0; - return message; - }, -}; - -function createBaseTIDPartitionRange(): TIDPartitionRange { - return { start: undefined, end: undefined }; -} - -export const TIDPartitionRange = { - encode(message: TIDPartitionRange, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.start !== undefined) { - TID.encode(message.start, writer.uint32(10).fork()).ldelim(); - } - if (message.end !== undefined) { - TID.encode(message.end, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TIDPartitionRange { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTIDPartitionRange(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.start = TID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.end = TID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TIDPartitionRange { - return { - start: isSet(object.start) ? TID.fromJSON(object.start) : undefined, - end: isSet(object.end) ? TID.fromJSON(object.end) : undefined, - }; - }, - - toJSON(message: TIDPartitionRange): unknown { - const obj: any = {}; - if (message.start !== undefined) { - obj.start = TID.toJSON(message.start); - } - if (message.end !== undefined) { - obj.end = TID.toJSON(message.end); - } - return obj; - }, - - create, I>>(base?: I): TIDPartitionRange { - return TIDPartitionRange.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TIDPartitionRange { - const message = createBaseTIDPartitionRange(); - message.start = (object.start !== undefined && object.start !== null) ? TID.fromPartial(object.start) : undefined; - message.end = (object.end !== undefined && object.end !== null) ? TID.fromPartial(object.end) : undefined; - return message; - }, -}; - -function createBasePartitionRange(): PartitionRange { - return { intRange: undefined, timestampRange: undefined, tidRange: undefined }; -} - -export const PartitionRange = { - encode(message: PartitionRange, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.intRange !== undefined) { - IntPartitionRange.encode(message.intRange, writer.uint32(10).fork()).ldelim(); - } - if (message.timestampRange !== undefined) { - TimestampPartitionRange.encode(message.timestampRange, writer.uint32(18).fork()).ldelim(); - } - if (message.tidRange !== undefined) { - TIDPartitionRange.encode(message.tidRange, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PartitionRange { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePartitionRange(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.intRange = IntPartitionRange.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.timestampRange = TimestampPartitionRange.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.tidRange = TIDPartitionRange.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PartitionRange { - return { - intRange: isSet(object.intRange) ? IntPartitionRange.fromJSON(object.intRange) : undefined, - timestampRange: isSet(object.timestampRange) - ? TimestampPartitionRange.fromJSON(object.timestampRange) - : undefined, - tidRange: isSet(object.tidRange) ? TIDPartitionRange.fromJSON(object.tidRange) : undefined, - }; - }, - - toJSON(message: PartitionRange): unknown { - const obj: any = {}; - if (message.intRange !== undefined) { - obj.intRange = IntPartitionRange.toJSON(message.intRange); - } - if (message.timestampRange !== undefined) { - obj.timestampRange = TimestampPartitionRange.toJSON(message.timestampRange); - } - if (message.tidRange !== undefined) { - obj.tidRange = TIDPartitionRange.toJSON(message.tidRange); - } - return obj; - }, - - create, I>>(base?: I): PartitionRange { - return PartitionRange.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): PartitionRange { - const message = createBasePartitionRange(); - message.intRange = (object.intRange !== undefined && object.intRange !== null) - ? IntPartitionRange.fromPartial(object.intRange) - : undefined; - message.timestampRange = (object.timestampRange !== undefined && object.timestampRange !== null) - ? TimestampPartitionRange.fromPartial(object.timestampRange) - : undefined; - message.tidRange = (object.tidRange !== undefined && object.tidRange !== null) - ? TIDPartitionRange.fromPartial(object.tidRange) - : undefined; - return message; - }, -}; - -function createBaseQRepWriteMode(): QRepWriteMode { - return { writeType: 0, upsertKeyColumns: [] }; -} - -export const QRepWriteMode = { - encode(message: QRepWriteMode, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.writeType !== 0) { - writer.uint32(8).int32(message.writeType); - } - for (const v of message.upsertKeyColumns) { - writer.uint32(18).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QRepWriteMode { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQRepWriteMode(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.writeType = reader.int32() as any; - continue; - case 2: - if (tag !== 18) { - break; - } - - message.upsertKeyColumns.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QRepWriteMode { - return { - writeType: isSet(object.writeType) ? qRepWriteTypeFromJSON(object.writeType) : 0, - upsertKeyColumns: Array.isArray(object?.upsertKeyColumns) - ? object.upsertKeyColumns.map((e: any) => String(e)) - : [], - }; - }, - - toJSON(message: QRepWriteMode): unknown { - const obj: any = {}; - if (message.writeType !== 0) { - obj.writeType = qRepWriteTypeToJSON(message.writeType); - } - if (message.upsertKeyColumns?.length) { - obj.upsertKeyColumns = message.upsertKeyColumns; - } - return obj; - }, - - create, I>>(base?: I): QRepWriteMode { - return QRepWriteMode.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): QRepWriteMode { - const message = createBaseQRepWriteMode(); - message.writeType = object.writeType ?? 0; - message.upsertKeyColumns = object.upsertKeyColumns?.map((e) => e) || []; - return message; - }, -}; - -function createBaseQRepConfig(): QRepConfig { - return { - flowJobName: "", - sourcePeer: undefined, - destinationPeer: undefined, - destinationTableIdentifier: "", - query: "", - watermarkTable: "", - watermarkColumn: "", - initialCopyOnly: false, - syncMode: 0, - batchSizeInt: 0, - batchDurationSeconds: 0, - maxParallelWorkers: 0, - waitBetweenBatchesSeconds: 0, - writeMode: undefined, - stagingPath: "", - numRowsPerPartition: 0, - setupWatermarkTableOnDestination: false, - dstTableFullResync: false, - syncedAtColName: "", - softDeleteColName: "", - }; -} - -export const QRepConfig = { - encode(message: QRepConfig, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowJobName !== "") { - writer.uint32(10).string(message.flowJobName); - } - if (message.sourcePeer !== undefined) { - Peer.encode(message.sourcePeer, writer.uint32(18).fork()).ldelim(); - } - if (message.destinationPeer !== undefined) { - Peer.encode(message.destinationPeer, writer.uint32(26).fork()).ldelim(); - } - if (message.destinationTableIdentifier !== "") { - writer.uint32(34).string(message.destinationTableIdentifier); - } - if (message.query !== "") { - writer.uint32(42).string(message.query); - } - if (message.watermarkTable !== "") { - writer.uint32(50).string(message.watermarkTable); - } - if (message.watermarkColumn !== "") { - writer.uint32(58).string(message.watermarkColumn); - } - if (message.initialCopyOnly === true) { - writer.uint32(64).bool(message.initialCopyOnly); - } - if (message.syncMode !== 0) { - writer.uint32(72).int32(message.syncMode); - } - if (message.batchSizeInt !== 0) { - writer.uint32(80).uint32(message.batchSizeInt); - } - if (message.batchDurationSeconds !== 0) { - writer.uint32(88).uint32(message.batchDurationSeconds); - } - if (message.maxParallelWorkers !== 0) { - writer.uint32(96).uint32(message.maxParallelWorkers); - } - if (message.waitBetweenBatchesSeconds !== 0) { - writer.uint32(104).uint32(message.waitBetweenBatchesSeconds); - } - if (message.writeMode !== undefined) { - QRepWriteMode.encode(message.writeMode, writer.uint32(114).fork()).ldelim(); - } - if (message.stagingPath !== "") { - writer.uint32(122).string(message.stagingPath); - } - if (message.numRowsPerPartition !== 0) { - writer.uint32(128).uint32(message.numRowsPerPartition); - } - if (message.setupWatermarkTableOnDestination === true) { - writer.uint32(136).bool(message.setupWatermarkTableOnDestination); - } - if (message.dstTableFullResync === true) { - writer.uint32(144).bool(message.dstTableFullResync); - } - if (message.syncedAtColName !== "") { - writer.uint32(154).string(message.syncedAtColName); - } - if (message.softDeleteColName !== "") { - writer.uint32(162).string(message.softDeleteColName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QRepConfig { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQRepConfig(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.sourcePeer = Peer.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.destinationPeer = Peer.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.destinationTableIdentifier = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.query = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.watermarkTable = reader.string(); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.watermarkColumn = reader.string(); - continue; - case 8: - if (tag !== 64) { - break; - } - - message.initialCopyOnly = reader.bool(); - continue; - case 9: - if (tag !== 72) { - break; - } - - message.syncMode = reader.int32() as any; - continue; - case 10: - if (tag !== 80) { - break; - } - - message.batchSizeInt = reader.uint32(); - continue; - case 11: - if (tag !== 88) { - break; - } - - message.batchDurationSeconds = reader.uint32(); - continue; - case 12: - if (tag !== 96) { - break; - } - - message.maxParallelWorkers = reader.uint32(); - continue; - case 13: - if (tag !== 104) { - break; - } - - message.waitBetweenBatchesSeconds = reader.uint32(); - continue; - case 14: - if (tag !== 114) { - break; - } - - message.writeMode = QRepWriteMode.decode(reader, reader.uint32()); - continue; - case 15: - if (tag !== 122) { - break; - } - - message.stagingPath = reader.string(); - continue; - case 16: - if (tag !== 128) { - break; - } - - message.numRowsPerPartition = reader.uint32(); - continue; - case 17: - if (tag !== 136) { - break; - } - - message.setupWatermarkTableOnDestination = reader.bool(); - continue; - case 18: - if (tag !== 144) { - break; - } - - message.dstTableFullResync = reader.bool(); - continue; - case 19: - if (tag !== 154) { - break; - } - - message.syncedAtColName = reader.string(); - continue; - case 20: - if (tag !== 162) { - break; - } - - message.softDeleteColName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QRepConfig { - return { - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - sourcePeer: isSet(object.sourcePeer) ? Peer.fromJSON(object.sourcePeer) : undefined, - destinationPeer: isSet(object.destinationPeer) ? Peer.fromJSON(object.destinationPeer) : undefined, - destinationTableIdentifier: isSet(object.destinationTableIdentifier) - ? String(object.destinationTableIdentifier) - : "", - query: isSet(object.query) ? String(object.query) : "", - watermarkTable: isSet(object.watermarkTable) ? String(object.watermarkTable) : "", - watermarkColumn: isSet(object.watermarkColumn) ? String(object.watermarkColumn) : "", - initialCopyOnly: isSet(object.initialCopyOnly) ? Boolean(object.initialCopyOnly) : false, - syncMode: isSet(object.syncMode) ? qRepSyncModeFromJSON(object.syncMode) : 0, - batchSizeInt: isSet(object.batchSizeInt) ? Number(object.batchSizeInt) : 0, - batchDurationSeconds: isSet(object.batchDurationSeconds) ? Number(object.batchDurationSeconds) : 0, - maxParallelWorkers: isSet(object.maxParallelWorkers) ? Number(object.maxParallelWorkers) : 0, - waitBetweenBatchesSeconds: isSet(object.waitBetweenBatchesSeconds) ? Number(object.waitBetweenBatchesSeconds) : 0, - writeMode: isSet(object.writeMode) ? QRepWriteMode.fromJSON(object.writeMode) : undefined, - stagingPath: isSet(object.stagingPath) ? String(object.stagingPath) : "", - numRowsPerPartition: isSet(object.numRowsPerPartition) ? Number(object.numRowsPerPartition) : 0, - setupWatermarkTableOnDestination: isSet(object.setupWatermarkTableOnDestination) - ? Boolean(object.setupWatermarkTableOnDestination) - : false, - dstTableFullResync: isSet(object.dstTableFullResync) ? Boolean(object.dstTableFullResync) : false, - syncedAtColName: isSet(object.syncedAtColName) ? String(object.syncedAtColName) : "", - softDeleteColName: isSet(object.softDeleteColName) ? String(object.softDeleteColName) : "", - }; - }, - - toJSON(message: QRepConfig): unknown { - const obj: any = {}; - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.sourcePeer !== undefined) { - obj.sourcePeer = Peer.toJSON(message.sourcePeer); - } - if (message.destinationPeer !== undefined) { - obj.destinationPeer = Peer.toJSON(message.destinationPeer); - } - if (message.destinationTableIdentifier !== "") { - obj.destinationTableIdentifier = message.destinationTableIdentifier; - } - if (message.query !== "") { - obj.query = message.query; - } - if (message.watermarkTable !== "") { - obj.watermarkTable = message.watermarkTable; - } - if (message.watermarkColumn !== "") { - obj.watermarkColumn = message.watermarkColumn; - } - if (message.initialCopyOnly === true) { - obj.initialCopyOnly = message.initialCopyOnly; - } - if (message.syncMode !== 0) { - obj.syncMode = qRepSyncModeToJSON(message.syncMode); - } - if (message.batchSizeInt !== 0) { - obj.batchSizeInt = Math.round(message.batchSizeInt); - } - if (message.batchDurationSeconds !== 0) { - obj.batchDurationSeconds = Math.round(message.batchDurationSeconds); - } - if (message.maxParallelWorkers !== 0) { - obj.maxParallelWorkers = Math.round(message.maxParallelWorkers); - } - if (message.waitBetweenBatchesSeconds !== 0) { - obj.waitBetweenBatchesSeconds = Math.round(message.waitBetweenBatchesSeconds); - } - if (message.writeMode !== undefined) { - obj.writeMode = QRepWriteMode.toJSON(message.writeMode); - } - if (message.stagingPath !== "") { - obj.stagingPath = message.stagingPath; - } - if (message.numRowsPerPartition !== 0) { - obj.numRowsPerPartition = Math.round(message.numRowsPerPartition); - } - if (message.setupWatermarkTableOnDestination === true) { - obj.setupWatermarkTableOnDestination = message.setupWatermarkTableOnDestination; - } - if (message.dstTableFullResync === true) { - obj.dstTableFullResync = message.dstTableFullResync; - } - if (message.syncedAtColName !== "") { - obj.syncedAtColName = message.syncedAtColName; - } - if (message.softDeleteColName !== "") { - obj.softDeleteColName = message.softDeleteColName; - } - return obj; - }, - - create, I>>(base?: I): QRepConfig { - return QRepConfig.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): QRepConfig { - const message = createBaseQRepConfig(); - message.flowJobName = object.flowJobName ?? ""; - message.sourcePeer = (object.sourcePeer !== undefined && object.sourcePeer !== null) - ? Peer.fromPartial(object.sourcePeer) - : undefined; - message.destinationPeer = (object.destinationPeer !== undefined && object.destinationPeer !== null) - ? Peer.fromPartial(object.destinationPeer) - : undefined; - message.destinationTableIdentifier = object.destinationTableIdentifier ?? ""; - message.query = object.query ?? ""; - message.watermarkTable = object.watermarkTable ?? ""; - message.watermarkColumn = object.watermarkColumn ?? ""; - message.initialCopyOnly = object.initialCopyOnly ?? false; - message.syncMode = object.syncMode ?? 0; - message.batchSizeInt = object.batchSizeInt ?? 0; - message.batchDurationSeconds = object.batchDurationSeconds ?? 0; - message.maxParallelWorkers = object.maxParallelWorkers ?? 0; - message.waitBetweenBatchesSeconds = object.waitBetweenBatchesSeconds ?? 0; - message.writeMode = (object.writeMode !== undefined && object.writeMode !== null) - ? QRepWriteMode.fromPartial(object.writeMode) - : undefined; - message.stagingPath = object.stagingPath ?? ""; - message.numRowsPerPartition = object.numRowsPerPartition ?? 0; - message.setupWatermarkTableOnDestination = object.setupWatermarkTableOnDestination ?? false; - message.dstTableFullResync = object.dstTableFullResync ?? false; - message.syncedAtColName = object.syncedAtColName ?? ""; - message.softDeleteColName = object.softDeleteColName ?? ""; - return message; - }, -}; - -function createBaseQRepPartition(): QRepPartition { - return { partitionId: "", range: undefined, fullTablePartition: false }; -} - -export const QRepPartition = { - encode(message: QRepPartition, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.partitionId !== "") { - writer.uint32(18).string(message.partitionId); - } - if (message.range !== undefined) { - PartitionRange.encode(message.range, writer.uint32(26).fork()).ldelim(); - } - if (message.fullTablePartition === true) { - writer.uint32(32).bool(message.fullTablePartition); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QRepPartition { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQRepPartition(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 2: - if (tag !== 18) { - break; - } - - message.partitionId = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.range = PartitionRange.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.fullTablePartition = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QRepPartition { - return { - partitionId: isSet(object.partitionId) ? String(object.partitionId) : "", - range: isSet(object.range) ? PartitionRange.fromJSON(object.range) : undefined, - fullTablePartition: isSet(object.fullTablePartition) ? Boolean(object.fullTablePartition) : false, - }; - }, - - toJSON(message: QRepPartition): unknown { - const obj: any = {}; - if (message.partitionId !== "") { - obj.partitionId = message.partitionId; - } - if (message.range !== undefined) { - obj.range = PartitionRange.toJSON(message.range); - } - if (message.fullTablePartition === true) { - obj.fullTablePartition = message.fullTablePartition; - } - return obj; - }, - - create, I>>(base?: I): QRepPartition { - return QRepPartition.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): QRepPartition { - const message = createBaseQRepPartition(); - message.partitionId = object.partitionId ?? ""; - message.range = (object.range !== undefined && object.range !== null) - ? PartitionRange.fromPartial(object.range) - : undefined; - message.fullTablePartition = object.fullTablePartition ?? false; - return message; - }, -}; - -function createBaseQRepPartitionBatch(): QRepPartitionBatch { - return { batchId: 0, partitions: [] }; -} - -export const QRepPartitionBatch = { - encode(message: QRepPartitionBatch, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.batchId !== 0) { - writer.uint32(8).int32(message.batchId); - } - for (const v of message.partitions) { - QRepPartition.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QRepPartitionBatch { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQRepPartitionBatch(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.batchId = reader.int32(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.partitions.push(QRepPartition.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QRepPartitionBatch { - return { - batchId: isSet(object.batchId) ? Number(object.batchId) : 0, - partitions: Array.isArray(object?.partitions) ? object.partitions.map((e: any) => QRepPartition.fromJSON(e)) : [], - }; - }, - - toJSON(message: QRepPartitionBatch): unknown { - const obj: any = {}; - if (message.batchId !== 0) { - obj.batchId = Math.round(message.batchId); - } - if (message.partitions?.length) { - obj.partitions = message.partitions.map((e) => QRepPartition.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): QRepPartitionBatch { - return QRepPartitionBatch.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): QRepPartitionBatch { - const message = createBaseQRepPartitionBatch(); - message.batchId = object.batchId ?? 0; - message.partitions = object.partitions?.map((e) => QRepPartition.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseQRepParitionResult(): QRepParitionResult { - return { partitions: [] }; -} - -export const QRepParitionResult = { - encode(message: QRepParitionResult, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.partitions) { - QRepPartition.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QRepParitionResult { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQRepParitionResult(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.partitions.push(QRepPartition.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QRepParitionResult { - return { - partitions: Array.isArray(object?.partitions) ? object.partitions.map((e: any) => QRepPartition.fromJSON(e)) : [], - }; - }, - - toJSON(message: QRepParitionResult): unknown { - const obj: any = {}; - if (message.partitions?.length) { - obj.partitions = message.partitions.map((e) => QRepPartition.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): QRepParitionResult { - return QRepParitionResult.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): QRepParitionResult { - const message = createBaseQRepParitionResult(); - message.partitions = object.partitions?.map((e) => QRepPartition.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseDropFlowInput(): DropFlowInput { - return { flowName: "" }; -} - -export const DropFlowInput = { - encode(message: DropFlowInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowName !== "") { - writer.uint32(10).string(message.flowName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DropFlowInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDropFlowInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.flowName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DropFlowInput { - return { flowName: isSet(object.flowName) ? String(object.flowName) : "" }; - }, - - toJSON(message: DropFlowInput): unknown { - const obj: any = {}; - if (message.flowName !== "") { - obj.flowName = message.flowName; - } - return obj; - }, - - create, I>>(base?: I): DropFlowInput { - return DropFlowInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): DropFlowInput { - const message = createBaseDropFlowInput(); - message.flowName = object.flowName ?? ""; - return message; - }, -}; - -function createBaseDeltaAddedColumn(): DeltaAddedColumn { - return { columnName: "", columnType: "" }; -} - -export const DeltaAddedColumn = { - encode(message: DeltaAddedColumn, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.columnName !== "") { - writer.uint32(10).string(message.columnName); - } - if (message.columnType !== "") { - writer.uint32(18).string(message.columnType); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DeltaAddedColumn { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeltaAddedColumn(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.columnName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.columnType = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DeltaAddedColumn { - return { - columnName: isSet(object.columnName) ? String(object.columnName) : "", - columnType: isSet(object.columnType) ? String(object.columnType) : "", - }; - }, - - toJSON(message: DeltaAddedColumn): unknown { - const obj: any = {}; - if (message.columnName !== "") { - obj.columnName = message.columnName; - } - if (message.columnType !== "") { - obj.columnType = message.columnType; - } - return obj; - }, - - create, I>>(base?: I): DeltaAddedColumn { - return DeltaAddedColumn.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): DeltaAddedColumn { - const message = createBaseDeltaAddedColumn(); - message.columnName = object.columnName ?? ""; - message.columnType = object.columnType ?? ""; - return message; - }, -}; - -function createBaseTableSchemaDelta(): TableSchemaDelta { - return { srcTableName: "", dstTableName: "", addedColumns: [] }; -} - -export const TableSchemaDelta = { - encode(message: TableSchemaDelta, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.srcTableName !== "") { - writer.uint32(10).string(message.srcTableName); - } - if (message.dstTableName !== "") { - writer.uint32(18).string(message.dstTableName); - } - for (const v of message.addedColumns) { - DeltaAddedColumn.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TableSchemaDelta { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTableSchemaDelta(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.srcTableName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.dstTableName = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.addedColumns.push(DeltaAddedColumn.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TableSchemaDelta { - return { - srcTableName: isSet(object.srcTableName) ? String(object.srcTableName) : "", - dstTableName: isSet(object.dstTableName) ? String(object.dstTableName) : "", - addedColumns: Array.isArray(object?.addedColumns) - ? object.addedColumns.map((e: any) => DeltaAddedColumn.fromJSON(e)) - : [], - }; - }, - - toJSON(message: TableSchemaDelta): unknown { - const obj: any = {}; - if (message.srcTableName !== "") { - obj.srcTableName = message.srcTableName; - } - if (message.dstTableName !== "") { - obj.dstTableName = message.dstTableName; - } - if (message.addedColumns?.length) { - obj.addedColumns = message.addedColumns.map((e) => DeltaAddedColumn.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): TableSchemaDelta { - return TableSchemaDelta.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TableSchemaDelta { - const message = createBaseTableSchemaDelta(); - message.srcTableName = object.srcTableName ?? ""; - message.dstTableName = object.dstTableName ?? ""; - message.addedColumns = object.addedColumns?.map((e) => DeltaAddedColumn.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseReplayTableSchemaDeltaInput(): ReplayTableSchemaDeltaInput { - return { flowConnectionConfigs: undefined, tableSchemaDeltas: [] }; -} - -export const ReplayTableSchemaDeltaInput = { - encode(message: ReplayTableSchemaDeltaInput, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowConnectionConfigs !== undefined) { - FlowConnectionConfigs.encode(message.flowConnectionConfigs, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.tableSchemaDeltas) { - TableSchemaDelta.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ReplayTableSchemaDeltaInput { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseReplayTableSchemaDeltaInput(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.flowConnectionConfigs = FlowConnectionConfigs.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.tableSchemaDeltas.push(TableSchemaDelta.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ReplayTableSchemaDeltaInput { - return { - flowConnectionConfigs: isSet(object.flowConnectionConfigs) - ? FlowConnectionConfigs.fromJSON(object.flowConnectionConfigs) - : undefined, - tableSchemaDeltas: Array.isArray(object?.tableSchemaDeltas) - ? object.tableSchemaDeltas.map((e: any) => TableSchemaDelta.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ReplayTableSchemaDeltaInput): unknown { - const obj: any = {}; - if (message.flowConnectionConfigs !== undefined) { - obj.flowConnectionConfigs = FlowConnectionConfigs.toJSON(message.flowConnectionConfigs); - } - if (message.tableSchemaDeltas?.length) { - obj.tableSchemaDeltas = message.tableSchemaDeltas.map((e) => TableSchemaDelta.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): ReplayTableSchemaDeltaInput { - return ReplayTableSchemaDeltaInput.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): ReplayTableSchemaDeltaInput { - const message = createBaseReplayTableSchemaDeltaInput(); - message.flowConnectionConfigs = - (object.flowConnectionConfigs !== undefined && object.flowConnectionConfigs !== null) - ? FlowConnectionConfigs.fromPartial(object.flowConnectionConfigs) - : undefined; - message.tableSchemaDeltas = object.tableSchemaDeltas?.map((e) => TableSchemaDelta.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseQRepFlowState(): QRepFlowState { - return { lastPartition: undefined, numPartitionsProcessed: 0, needsResync: false, disableWaitForNewRows: false }; -} - -export const QRepFlowState = { - encode(message: QRepFlowState, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.lastPartition !== undefined) { - QRepPartition.encode(message.lastPartition, writer.uint32(10).fork()).ldelim(); - } - if (message.numPartitionsProcessed !== 0) { - writer.uint32(16).uint64(message.numPartitionsProcessed); - } - if (message.needsResync === true) { - writer.uint32(24).bool(message.needsResync); - } - if (message.disableWaitForNewRows === true) { - writer.uint32(32).bool(message.disableWaitForNewRows); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QRepFlowState { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQRepFlowState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.lastPartition = QRepPartition.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.numPartitionsProcessed = longToNumber(reader.uint64() as Long); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.needsResync = reader.bool(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.disableWaitForNewRows = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QRepFlowState { - return { - lastPartition: isSet(object.lastPartition) ? QRepPartition.fromJSON(object.lastPartition) : undefined, - numPartitionsProcessed: isSet(object.numPartitionsProcessed) ? Number(object.numPartitionsProcessed) : 0, - needsResync: isSet(object.needsResync) ? Boolean(object.needsResync) : false, - disableWaitForNewRows: isSet(object.disableWaitForNewRows) ? Boolean(object.disableWaitForNewRows) : false, - }; - }, - - toJSON(message: QRepFlowState): unknown { - const obj: any = {}; - if (message.lastPartition !== undefined) { - obj.lastPartition = QRepPartition.toJSON(message.lastPartition); - } - if (message.numPartitionsProcessed !== 0) { - obj.numPartitionsProcessed = Math.round(message.numPartitionsProcessed); - } - if (message.needsResync === true) { - obj.needsResync = message.needsResync; - } - if (message.disableWaitForNewRows === true) { - obj.disableWaitForNewRows = message.disableWaitForNewRows; - } - return obj; - }, - - create, I>>(base?: I): QRepFlowState { - return QRepFlowState.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): QRepFlowState { - const message = createBaseQRepFlowState(); - message.lastPartition = (object.lastPartition !== undefined && object.lastPartition !== null) - ? QRepPartition.fromPartial(object.lastPartition) - : undefined; - message.numPartitionsProcessed = object.numPartitionsProcessed ?? 0; - message.needsResync = object.needsResync ?? false; - message.disableWaitForNewRows = object.disableWaitForNewRows ?? false; - return message; - }, -}; - -function createBasePeerDBColumns(): PeerDBColumns { - return { softDeleteColName: "", syncedAtColName: "", softDelete: false }; -} - -export const PeerDBColumns = { - encode(message: PeerDBColumns, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.softDeleteColName !== "") { - writer.uint32(10).string(message.softDeleteColName); - } - if (message.syncedAtColName !== "") { - writer.uint32(18).string(message.syncedAtColName); - } - if (message.softDelete === true) { - writer.uint32(24).bool(message.softDelete); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PeerDBColumns { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePeerDBColumns(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.softDeleteColName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.syncedAtColName = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.softDelete = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PeerDBColumns { - return { - softDeleteColName: isSet(object.softDeleteColName) ? String(object.softDeleteColName) : "", - syncedAtColName: isSet(object.syncedAtColName) ? String(object.syncedAtColName) : "", - softDelete: isSet(object.softDelete) ? Boolean(object.softDelete) : false, - }; - }, - - toJSON(message: PeerDBColumns): unknown { - const obj: any = {}; - if (message.softDeleteColName !== "") { - obj.softDeleteColName = message.softDeleteColName; - } - if (message.syncedAtColName !== "") { - obj.syncedAtColName = message.syncedAtColName; - } - if (message.softDelete === true) { - obj.softDelete = message.softDelete; - } - return obj; - }, - - create, I>>(base?: I): PeerDBColumns { - return PeerDBColumns.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): PeerDBColumns { - const message = createBasePeerDBColumns(); - message.softDeleteColName = object.softDeleteColName ?? ""; - message.syncedAtColName = object.syncedAtColName ?? ""; - message.softDelete = object.softDelete ?? false; - return message; - }, -}; - -function createBaseGetOpenConnectionsForUserResult(): GetOpenConnectionsForUserResult { - return { userName: "", currentOpenConnections: 0 }; -} - -export const GetOpenConnectionsForUserResult = { - encode(message: GetOpenConnectionsForUserResult, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.userName !== "") { - writer.uint32(10).string(message.userName); - } - if (message.currentOpenConnections !== 0) { - writer.uint32(16).int64(message.currentOpenConnections); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GetOpenConnectionsForUserResult { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGetOpenConnectionsForUserResult(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.userName = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.currentOpenConnections = longToNumber(reader.int64() as Long); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GetOpenConnectionsForUserResult { - return { - userName: isSet(object.userName) ? String(object.userName) : "", - currentOpenConnections: isSet(object.currentOpenConnections) ? Number(object.currentOpenConnections) : 0, - }; - }, - - toJSON(message: GetOpenConnectionsForUserResult): unknown { - const obj: any = {}; - if (message.userName !== "") { - obj.userName = message.userName; - } - if (message.currentOpenConnections !== 0) { - obj.currentOpenConnections = Math.round(message.currentOpenConnections); - } - return obj; - }, - - create, I>>(base?: I): GetOpenConnectionsForUserResult { - return GetOpenConnectionsForUserResult.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): GetOpenConnectionsForUserResult { - const message = createBaseGetOpenConnectionsForUserResult(); - message.userName = object.userName ?? ""; - message.currentOpenConnections = object.currentOpenConnections ?? 0; - return message; - }, -}; - -declare const self: any | undefined; -declare const window: any | undefined; -declare const global: any | undefined; -const tsProtoGlobalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function toTimestamp(date: Date): Timestamp { - const seconds = date.getTime() / 1_000; - const nanos = (date.getTime() % 1_000) * 1_000_000; - return { seconds, nanos }; -} - -function fromTimestamp(t: Timestamp): Date { - let millis = (t.seconds || 0) * 1_000; - millis += (t.nanos || 0) / 1_000_000; - return new Date(millis); -} - -function fromJsonTimestamp(o: any): Date { - if (o instanceof Date) { - return o; - } else if (typeof o === "string") { - return new Date(o); - } else { - return fromTimestamp(Timestamp.fromJSON(o)); - } -} - -function longToNumber(long: Long): number { - if (long.gt(Number.MAX_SAFE_INTEGER)) { - throw new tsProtoGlobalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); - } - return long.toNumber(); -} - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ui/grpc_generated/google/api/annotations.ts b/ui/grpc_generated/google/api/annotations.ts deleted file mode 100644 index c2161053d4..0000000000 --- a/ui/grpc_generated/google/api/annotations.ts +++ /dev/null @@ -1,3 +0,0 @@ -/* eslint-disable */ - -export const protobufPackage = "google.api"; diff --git a/ui/grpc_generated/google/api/http.ts b/ui/grpc_generated/google/api/http.ts deleted file mode 100644 index 339db6c540..0000000000 --- a/ui/grpc_generated/google/api/http.ts +++ /dev/null @@ -1,745 +0,0 @@ -/* eslint-disable */ -import _m0 from "protobufjs/minimal"; - -export const protobufPackage = "google.api"; - -/** - * Defines the HTTP configuration for an API service. It contains a list of - * [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method - * to one or more HTTP REST API methods. - */ -export interface Http { - /** - * A list of HTTP configuration rules that apply to individual API methods. - * - * **NOTE:** All service configuration rules follow "last one wins" order. - */ - rules: HttpRule[]; - /** - * When set to true, URL path parameters will be fully URI-decoded except in - * cases of single segment matches in reserved expansion, where "%2F" will be - * left encoded. - * - * The default behavior is to not decode RFC 6570 reserved characters in multi - * segment matches. - */ - fullyDecodeReservedExpansion: boolean; -} - -/** - * # gRPC Transcoding - * - * gRPC Transcoding is a feature for mapping between a gRPC method and one or - * more HTTP REST endpoints. It allows developers to build a single API service - * that supports both gRPC APIs and REST APIs. Many systems, including [Google - * APIs](https://github.com/googleapis/googleapis), - * [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC - * Gateway](https://github.com/grpc-ecosystem/grpc-gateway), - * and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature - * and use it for large scale production services. - * - * `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies - * how different portions of the gRPC request message are mapped to the URL - * path, URL query parameters, and HTTP request body. It also controls how the - * gRPC response message is mapped to the HTTP response body. `HttpRule` is - * typically specified as an `google.api.http` annotation on the gRPC method. - * - * Each mapping specifies a URL path template and an HTTP method. The path - * template may refer to one or more fields in the gRPC request message, as long - * as each field is a non-repeated field with a primitive (non-message) type. - * The path template controls how fields of the request message are mapped to - * the URL path. - * - * Example: - * - * service Messaging { - * rpc GetMessage(GetMessageRequest) returns (Message) { - * option (google.api.http) = { - * get: "/v1/{name=messages/*}" - * }; - * } - * } - * message GetMessageRequest { - * string name = 1; // Mapped to URL path. - * } - * message Message { - * string text = 1; // The resource content. - * } - * - * This enables an HTTP REST to gRPC mapping as below: - * - * HTTP | gRPC - * -----|----- - * `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` - * - * Any fields in the request message which are not bound by the path template - * automatically become HTTP query parameters if there is no HTTP request body. - * For example: - * - * service Messaging { - * rpc GetMessage(GetMessageRequest) returns (Message) { - * option (google.api.http) = { - * get:"/v1/messages/{message_id}" - * }; - * } - * } - * message GetMessageRequest { - * message SubMessage { - * string subfield = 1; - * } - * string message_id = 1; // Mapped to URL path. - * int64 revision = 2; // Mapped to URL query parameter `revision`. - * SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. - * } - * - * This enables a HTTP JSON to RPC mapping as below: - * - * HTTP | gRPC - * -----|----- - * `GET /v1/messages/123456?revision=2&sub.subfield=foo` | - * `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: - * "foo"))` - * - * Note that fields which are mapped to URL query parameters must have a - * primitive type or a repeated primitive type or a non-repeated message type. - * In the case of a repeated type, the parameter can be repeated in the URL - * as `...?param=A¶m=B`. In the case of a message type, each field of the - * message is mapped to a separate parameter, such as - * `...?foo.a=A&foo.b=B&foo.c=C`. - * - * For HTTP methods that allow a request body, the `body` field - * specifies the mapping. Consider a REST update method on the - * message resource collection: - * - * service Messaging { - * rpc UpdateMessage(UpdateMessageRequest) returns (Message) { - * option (google.api.http) = { - * patch: "/v1/messages/{message_id}" - * body: "message" - * }; - * } - * } - * message UpdateMessageRequest { - * string message_id = 1; // mapped to the URL - * Message message = 2; // mapped to the body - * } - * - * The following HTTP JSON to RPC mapping is enabled, where the - * representation of the JSON in the request body is determined by - * protos JSON encoding: - * - * HTTP | gRPC - * -----|----- - * `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: - * "123456" message { text: "Hi!" })` - * - * The special name `*` can be used in the body mapping to define that - * every field not bound by the path template should be mapped to the - * request body. This enables the following alternative definition of - * the update method: - * - * service Messaging { - * rpc UpdateMessage(Message) returns (Message) { - * option (google.api.http) = { - * patch: "/v1/messages/{message_id}" - * body: "*" - * }; - * } - * } - * message Message { - * string message_id = 1; - * string text = 2; - * } - * - * The following HTTP JSON to RPC mapping is enabled: - * - * HTTP | gRPC - * -----|----- - * `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: - * "123456" text: "Hi!")` - * - * Note that when using `*` in the body mapping, it is not possible to - * have HTTP parameters, as all fields not bound by the path end in - * the body. This makes this option more rarely used in practice when - * defining REST APIs. The common usage of `*` is in custom methods - * which don't use the URL at all for transferring data. - * - * It is possible to define multiple HTTP methods for one RPC by using - * the `additional_bindings` option. Example: - * - * service Messaging { - * rpc GetMessage(GetMessageRequest) returns (Message) { - * option (google.api.http) = { - * get: "/v1/messages/{message_id}" - * additional_bindings { - * get: "/v1/users/{user_id}/messages/{message_id}" - * } - * }; - * } - * } - * message GetMessageRequest { - * string message_id = 1; - * string user_id = 2; - * } - * - * This enables the following two alternative HTTP JSON to RPC mappings: - * - * HTTP | gRPC - * -----|----- - * `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` - * `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: - * "123456")` - * - * ## Rules for HTTP mapping - * - * 1. Leaf request fields (recursive expansion nested messages in the request - * message) are classified into three categories: - * - Fields referred by the path template. They are passed via the URL path. - * - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They - * are passed via the HTTP - * request body. - * - All other fields are passed via the URL query parameters, and the - * parameter name is the field path in the request message. A repeated - * field can be represented as multiple query parameters under the same - * name. - * 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL - * query parameter, all fields - * are passed via URL path and HTTP request body. - * 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP - * request body, all - * fields are passed via URL path and URL query parameters. - * - * ### Path template syntax - * - * Template = "/" Segments [ Verb ] ; - * Segments = Segment { "/" Segment } ; - * Segment = "*" | "**" | LITERAL | Variable ; - * Variable = "{" FieldPath [ "=" Segments ] "}" ; - * FieldPath = IDENT { "." IDENT } ; - * Verb = ":" LITERAL ; - * - * The syntax `*` matches a single URL path segment. The syntax `**` matches - * zero or more URL path segments, which must be the last part of the URL path - * except the `Verb`. - * - * The syntax `Variable` matches part of the URL path as specified by its - * template. A variable template must not contain other variables. If a variable - * matches a single path segment, its template may be omitted, e.g. `{var}` - * is equivalent to `{var=*}`. - * - * The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` - * contains any reserved character, such characters should be percent-encoded - * before the matching. - * - * If a variable contains exactly one path segment, such as `"{var}"` or - * `"{var=*}"`, when such a variable is expanded into a URL path on the client - * side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The - * server side does the reverse decoding. Such variables show up in the - * [Discovery - * Document](https://developers.google.com/discovery/v1/reference/apis) as - * `{var}`. - * - * If a variable contains multiple path segments, such as `"{var=foo/*}"` - * or `"{var=**}"`, when such a variable is expanded into a URL path on the - * client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. - * The server side does the reverse decoding, except "%2F" and "%2f" are left - * unchanged. Such variables show up in the - * [Discovery - * Document](https://developers.google.com/discovery/v1/reference/apis) as - * `{+var}`. - * - * ## Using gRPC API Service Configuration - * - * gRPC API Service Configuration (service config) is a configuration language - * for configuring a gRPC service to become a user-facing product. The - * service config is simply the YAML representation of the `google.api.Service` - * proto message. - * - * As an alternative to annotating your proto file, you can configure gRPC - * transcoding in your service config YAML files. You do this by specifying a - * `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same - * effect as the proto annotation. This can be particularly useful if you - * have a proto that is reused in multiple services. Note that any transcoding - * specified in the service config will override any matching transcoding - * configuration in the proto. - * - * Example: - * - * http: - * rules: - * # Selects a gRPC method and applies HttpRule to it. - * - selector: example.v1.Messaging.GetMessage - * get: /v1/messages/{message_id}/{sub.subfield} - * - * ## Special notes - * - * When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the - * proto to JSON conversion must follow the [proto3 - * specification](https://developers.google.com/protocol-buffers/docs/proto3#json). - * - * While the single segment variable follows the semantics of - * [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String - * Expansion, the multi segment variable **does not** follow RFC 6570 Section - * 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion - * does not expand special characters like `?` and `#`, which would lead - * to invalid URLs. As the result, gRPC Transcoding uses a custom encoding - * for multi segment variables. - * - * The path variables **must not** refer to any repeated or mapped field, - * because client libraries are not capable of handling such variable expansion. - * - * The path variables **must not** capture the leading "/" character. The reason - * is that the most common use case "{var}" does not capture the leading "/" - * character. For consistency, all path variables must share the same behavior. - * - * Repeated message fields must not be mapped to URL query parameters, because - * no client library can support such complicated mapping. - * - * If an API needs to use a JSON array for request or response body, it can map - * the request or response body to a repeated field. However, some gRPC - * Transcoding implementations may not support this feature. - */ -export interface HttpRule { - /** - * Selects a method to which this rule applies. - * - * Refer to [selector][google.api.DocumentationRule.selector] for syntax - * details. - */ - selector: string; - /** - * Maps to HTTP GET. Used for listing and getting information about - * resources. - */ - get?: - | string - | undefined; - /** Maps to HTTP PUT. Used for replacing a resource. */ - put?: - | string - | undefined; - /** Maps to HTTP POST. Used for creating a resource or performing an action. */ - post?: - | string - | undefined; - /** Maps to HTTP DELETE. Used for deleting a resource. */ - delete?: - | string - | undefined; - /** Maps to HTTP PATCH. Used for updating a resource. */ - patch?: - | string - | undefined; - /** - * The custom pattern is used for specifying an HTTP method that is not - * included in the `pattern` field, such as HEAD, or "*" to leave the - * HTTP method unspecified for this rule. The wild-card rule is useful - * for services that provide content to Web (HTML) clients. - */ - custom?: - | CustomHttpPattern - | undefined; - /** - * The name of the request field whose value is mapped to the HTTP request - * body, or `*` for mapping all request fields not captured by the path - * pattern to the HTTP body, or omitted for not having any HTTP request body. - * - * NOTE: the referred field must be present at the top-level of the request - * message type. - */ - body: string; - /** - * Optional. The name of the response field whose value is mapped to the HTTP - * response body. When omitted, the entire response message will be used - * as the HTTP response body. - * - * NOTE: The referred field must be present at the top-level of the response - * message type. - */ - responseBody: string; - /** - * Additional HTTP bindings for the selector. Nested bindings must - * not contain an `additional_bindings` field themselves (that is, - * the nesting may only be one level deep). - */ - additionalBindings: HttpRule[]; -} - -/** A custom pattern is used for defining custom HTTP verb. */ -export interface CustomHttpPattern { - /** The name of this custom HTTP verb. */ - kind: string; - /** The path matched by this custom verb. */ - path: string; -} - -function createBaseHttp(): Http { - return { rules: [], fullyDecodeReservedExpansion: false }; -} - -export const Http = { - encode(message: Http, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.rules) { - HttpRule.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.fullyDecodeReservedExpansion === true) { - writer.uint32(16).bool(message.fullyDecodeReservedExpansion); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Http { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseHttp(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.rules.push(HttpRule.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.fullyDecodeReservedExpansion = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Http { - return { - rules: Array.isArray(object?.rules) ? object.rules.map((e: any) => HttpRule.fromJSON(e)) : [], - fullyDecodeReservedExpansion: isSet(object.fullyDecodeReservedExpansion) - ? Boolean(object.fullyDecodeReservedExpansion) - : false, - }; - }, - - toJSON(message: Http): unknown { - const obj: any = {}; - if (message.rules?.length) { - obj.rules = message.rules.map((e) => HttpRule.toJSON(e)); - } - if (message.fullyDecodeReservedExpansion === true) { - obj.fullyDecodeReservedExpansion = message.fullyDecodeReservedExpansion; - } - return obj; - }, - - create, I>>(base?: I): Http { - return Http.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): Http { - const message = createBaseHttp(); - message.rules = object.rules?.map((e) => HttpRule.fromPartial(e)) || []; - message.fullyDecodeReservedExpansion = object.fullyDecodeReservedExpansion ?? false; - return message; - }, -}; - -function createBaseHttpRule(): HttpRule { - return { - selector: "", - get: undefined, - put: undefined, - post: undefined, - delete: undefined, - patch: undefined, - custom: undefined, - body: "", - responseBody: "", - additionalBindings: [], - }; -} - -export const HttpRule = { - encode(message: HttpRule, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.selector !== "") { - writer.uint32(10).string(message.selector); - } - if (message.get !== undefined) { - writer.uint32(18).string(message.get); - } - if (message.put !== undefined) { - writer.uint32(26).string(message.put); - } - if (message.post !== undefined) { - writer.uint32(34).string(message.post); - } - if (message.delete !== undefined) { - writer.uint32(42).string(message.delete); - } - if (message.patch !== undefined) { - writer.uint32(50).string(message.patch); - } - if (message.custom !== undefined) { - CustomHttpPattern.encode(message.custom, writer.uint32(66).fork()).ldelim(); - } - if (message.body !== "") { - writer.uint32(58).string(message.body); - } - if (message.responseBody !== "") { - writer.uint32(98).string(message.responseBody); - } - for (const v of message.additionalBindings) { - HttpRule.encode(v!, writer.uint32(90).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): HttpRule { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseHttpRule(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.selector = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.get = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.put = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.post = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.delete = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.patch = reader.string(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.custom = CustomHttpPattern.decode(reader, reader.uint32()); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.body = reader.string(); - continue; - case 12: - if (tag !== 98) { - break; - } - - message.responseBody = reader.string(); - continue; - case 11: - if (tag !== 90) { - break; - } - - message.additionalBindings.push(HttpRule.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): HttpRule { - return { - selector: isSet(object.selector) ? String(object.selector) : "", - get: isSet(object.get) ? String(object.get) : undefined, - put: isSet(object.put) ? String(object.put) : undefined, - post: isSet(object.post) ? String(object.post) : undefined, - delete: isSet(object.delete) ? String(object.delete) : undefined, - patch: isSet(object.patch) ? String(object.patch) : undefined, - custom: isSet(object.custom) ? CustomHttpPattern.fromJSON(object.custom) : undefined, - body: isSet(object.body) ? String(object.body) : "", - responseBody: isSet(object.responseBody) ? String(object.responseBody) : "", - additionalBindings: Array.isArray(object?.additionalBindings) - ? object.additionalBindings.map((e: any) => HttpRule.fromJSON(e)) - : [], - }; - }, - - toJSON(message: HttpRule): unknown { - const obj: any = {}; - if (message.selector !== "") { - obj.selector = message.selector; - } - if (message.get !== undefined) { - obj.get = message.get; - } - if (message.put !== undefined) { - obj.put = message.put; - } - if (message.post !== undefined) { - obj.post = message.post; - } - if (message.delete !== undefined) { - obj.delete = message.delete; - } - if (message.patch !== undefined) { - obj.patch = message.patch; - } - if (message.custom !== undefined) { - obj.custom = CustomHttpPattern.toJSON(message.custom); - } - if (message.body !== "") { - obj.body = message.body; - } - if (message.responseBody !== "") { - obj.responseBody = message.responseBody; - } - if (message.additionalBindings?.length) { - obj.additionalBindings = message.additionalBindings.map((e) => HttpRule.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): HttpRule { - return HttpRule.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): HttpRule { - const message = createBaseHttpRule(); - message.selector = object.selector ?? ""; - message.get = object.get ?? undefined; - message.put = object.put ?? undefined; - message.post = object.post ?? undefined; - message.delete = object.delete ?? undefined; - message.patch = object.patch ?? undefined; - message.custom = (object.custom !== undefined && object.custom !== null) - ? CustomHttpPattern.fromPartial(object.custom) - : undefined; - message.body = object.body ?? ""; - message.responseBody = object.responseBody ?? ""; - message.additionalBindings = object.additionalBindings?.map((e) => HttpRule.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseCustomHttpPattern(): CustomHttpPattern { - return { kind: "", path: "" }; -} - -export const CustomHttpPattern = { - encode(message: CustomHttpPattern, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.kind !== "") { - writer.uint32(10).string(message.kind); - } - if (message.path !== "") { - writer.uint32(18).string(message.path); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CustomHttpPattern { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCustomHttpPattern(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.kind = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.path = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CustomHttpPattern { - return { kind: isSet(object.kind) ? String(object.kind) : "", path: isSet(object.path) ? String(object.path) : "" }; - }, - - toJSON(message: CustomHttpPattern): unknown { - const obj: any = {}; - if (message.kind !== "") { - obj.kind = message.kind; - } - if (message.path !== "") { - obj.path = message.path; - } - return obj; - }, - - create, I>>(base?: I): CustomHttpPattern { - return CustomHttpPattern.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CustomHttpPattern { - const message = createBaseCustomHttpPattern(); - message.kind = object.kind ?? ""; - message.path = object.path ?? ""; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ui/grpc_generated/google/protobuf/descriptor.ts b/ui/grpc_generated/google/protobuf/descriptor.ts deleted file mode 100644 index 0ebf9063ac..0000000000 --- a/ui/grpc_generated/google/protobuf/descriptor.ts +++ /dev/null @@ -1,4831 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; - -export const protobufPackage = "google.protobuf"; - -/** - * The protocol compiler can output a FileDescriptorSet containing the .proto - * files it parses. - */ -export interface FileDescriptorSet { - file: FileDescriptorProto[]; -} - -/** Describes a complete .proto file. */ -export interface FileDescriptorProto { - /** file name, relative to root of source tree */ - name: string; - /** e.g. "foo", "foo.bar", etc. */ - package: string; - /** Names of files imported by this file. */ - dependency: string[]; - /** Indexes of the public imported files in the dependency list above. */ - publicDependency: number[]; - /** - * Indexes of the weak imported files in the dependency list. - * For Google-internal migration only. Do not use. - */ - weakDependency: number[]; - /** All top-level definitions in this file. */ - messageType: DescriptorProto[]; - enumType: EnumDescriptorProto[]; - service: ServiceDescriptorProto[]; - extension: FieldDescriptorProto[]; - options: - | FileOptions - | undefined; - /** - * This field contains optional information about the original source code. - * You may safely remove this entire field without harming runtime - * functionality of the descriptors -- the information is needed only by - * development tools. - */ - sourceCodeInfo: - | SourceCodeInfo - | undefined; - /** - * The syntax of the proto file. - * The supported values are "proto2", "proto3", and "editions". - * - * If `edition` is present, this value must be "editions". - */ - syntax: string; - /** The edition of the proto file, which is an opaque string. */ - edition: string; -} - -/** Describes a message type. */ -export interface DescriptorProto { - name: string; - field: FieldDescriptorProto[]; - extension: FieldDescriptorProto[]; - nestedType: DescriptorProto[]; - enumType: EnumDescriptorProto[]; - extensionRange: DescriptorProto_ExtensionRange[]; - oneofDecl: OneofDescriptorProto[]; - options: MessageOptions | undefined; - reservedRange: DescriptorProto_ReservedRange[]; - /** - * Reserved field names, which may not be used by fields in the same message. - * A given name may only be reserved once. - */ - reservedName: string[]; -} - -export interface DescriptorProto_ExtensionRange { - /** Inclusive. */ - start: number; - /** Exclusive. */ - end: number; - options: ExtensionRangeOptions | undefined; -} - -/** - * Range of reserved tag numbers. Reserved tag numbers may not be used by - * fields or extension ranges in the same message. Reserved ranges may - * not overlap. - */ -export interface DescriptorProto_ReservedRange { - /** Inclusive. */ - start: number; - /** Exclusive. */ - end: number; -} - -export interface ExtensionRangeOptions { - /** The parser stores options it doesn't recognize here. See above. */ - uninterpretedOption: UninterpretedOption[]; -} - -/** Describes a field within a message. */ -export interface FieldDescriptorProto { - name: string; - number: number; - label: FieldDescriptorProto_Label; - /** - * If type_name is set, this need not be set. If both this and type_name - * are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - */ - type: FieldDescriptorProto_Type; - /** - * For message and enum types, this is the name of the type. If the name - * starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - * rules are used to find the type (i.e. first the nested types within this - * message are searched, then within the parent, on up to the root - * namespace). - */ - typeName: string; - /** - * For extensions, this is the name of the type being extended. It is - * resolved in the same manner as type_name. - */ - extendee: string; - /** - * For numeric types, contains the original text representation of the value. - * For booleans, "true" or "false". - * For strings, contains the default text contents (not escaped in any way). - * For bytes, contains the C escaped value. All bytes >= 128 are escaped. - */ - defaultValue: string; - /** - * If set, gives the index of a oneof in the containing type's oneof_decl - * list. This field is a member of that oneof. - */ - oneofIndex: number; - /** - * JSON name of this field. The value is set by protocol compiler. If the - * user has set a "json_name" option on this field, that option's value - * will be used. Otherwise, it's deduced from the field's name by converting - * it to camelCase. - */ - jsonName: string; - options: - | FieldOptions - | undefined; - /** - * If true, this is a proto3 "optional". When a proto3 field is optional, it - * tracks presence regardless of field type. - * - * When proto3_optional is true, this field must be belong to a oneof to - * signal to old proto3 clients that presence is tracked for this field. This - * oneof is known as a "synthetic" oneof, and this field must be its sole - * member (each proto3 optional field gets its own synthetic oneof). Synthetic - * oneofs exist in the descriptor only, and do not generate any API. Synthetic - * oneofs must be ordered after all "real" oneofs. - * - * For message fields, proto3_optional doesn't create any semantic change, - * since non-repeated message fields always track presence. However it still - * indicates the semantic detail of whether the user wrote "optional" or not. - * This can be useful for round-tripping the .proto file. For consistency we - * give message fields a synthetic oneof also, even though it is not required - * to track presence. This is especially important because the parser can't - * tell if a field is a message or an enum, so it must always create a - * synthetic oneof. - * - * Proto2 optional fields do not set this flag, because they already indicate - * optional with `LABEL_OPTIONAL`. - */ - proto3Optional: boolean; -} - -export enum FieldDescriptorProto_Type { - /** - * TYPE_DOUBLE - 0 is reserved for errors. - * Order is weird for historical reasons. - */ - TYPE_DOUBLE = 1, - TYPE_FLOAT = 2, - /** - * TYPE_INT64 - Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - * negative values are likely. - */ - TYPE_INT64 = 3, - TYPE_UINT64 = 4, - /** - * TYPE_INT32 - Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - * negative values are likely. - */ - TYPE_INT32 = 5, - TYPE_FIXED64 = 6, - TYPE_FIXED32 = 7, - TYPE_BOOL = 8, - TYPE_STRING = 9, - /** - * TYPE_GROUP - Tag-delimited aggregate. - * Group type is deprecated and not supported in proto3. However, Proto3 - * implementations should still be able to parse the group wire format and - * treat group fields as unknown fields. - */ - TYPE_GROUP = 10, - /** TYPE_MESSAGE - Length-delimited aggregate. */ - TYPE_MESSAGE = 11, - /** TYPE_BYTES - New in version 2. */ - TYPE_BYTES = 12, - TYPE_UINT32 = 13, - TYPE_ENUM = 14, - TYPE_SFIXED32 = 15, - TYPE_SFIXED64 = 16, - /** TYPE_SINT32 - Uses ZigZag encoding. */ - TYPE_SINT32 = 17, - /** TYPE_SINT64 - Uses ZigZag encoding. */ - TYPE_SINT64 = 18, - UNRECOGNIZED = -1, -} - -export function fieldDescriptorProto_TypeFromJSON(object: any): FieldDescriptorProto_Type { - switch (object) { - case 1: - case "TYPE_DOUBLE": - return FieldDescriptorProto_Type.TYPE_DOUBLE; - case 2: - case "TYPE_FLOAT": - return FieldDescriptorProto_Type.TYPE_FLOAT; - case 3: - case "TYPE_INT64": - return FieldDescriptorProto_Type.TYPE_INT64; - case 4: - case "TYPE_UINT64": - return FieldDescriptorProto_Type.TYPE_UINT64; - case 5: - case "TYPE_INT32": - return FieldDescriptorProto_Type.TYPE_INT32; - case 6: - case "TYPE_FIXED64": - return FieldDescriptorProto_Type.TYPE_FIXED64; - case 7: - case "TYPE_FIXED32": - return FieldDescriptorProto_Type.TYPE_FIXED32; - case 8: - case "TYPE_BOOL": - return FieldDescriptorProto_Type.TYPE_BOOL; - case 9: - case "TYPE_STRING": - return FieldDescriptorProto_Type.TYPE_STRING; - case 10: - case "TYPE_GROUP": - return FieldDescriptorProto_Type.TYPE_GROUP; - case 11: - case "TYPE_MESSAGE": - return FieldDescriptorProto_Type.TYPE_MESSAGE; - case 12: - case "TYPE_BYTES": - return FieldDescriptorProto_Type.TYPE_BYTES; - case 13: - case "TYPE_UINT32": - return FieldDescriptorProto_Type.TYPE_UINT32; - case 14: - case "TYPE_ENUM": - return FieldDescriptorProto_Type.TYPE_ENUM; - case 15: - case "TYPE_SFIXED32": - return FieldDescriptorProto_Type.TYPE_SFIXED32; - case 16: - case "TYPE_SFIXED64": - return FieldDescriptorProto_Type.TYPE_SFIXED64; - case 17: - case "TYPE_SINT32": - return FieldDescriptorProto_Type.TYPE_SINT32; - case 18: - case "TYPE_SINT64": - return FieldDescriptorProto_Type.TYPE_SINT64; - case -1: - case "UNRECOGNIZED": - default: - return FieldDescriptorProto_Type.UNRECOGNIZED; - } -} - -export function fieldDescriptorProto_TypeToJSON(object: FieldDescriptorProto_Type): string { - switch (object) { - case FieldDescriptorProto_Type.TYPE_DOUBLE: - return "TYPE_DOUBLE"; - case FieldDescriptorProto_Type.TYPE_FLOAT: - return "TYPE_FLOAT"; - case FieldDescriptorProto_Type.TYPE_INT64: - return "TYPE_INT64"; - case FieldDescriptorProto_Type.TYPE_UINT64: - return "TYPE_UINT64"; - case FieldDescriptorProto_Type.TYPE_INT32: - return "TYPE_INT32"; - case FieldDescriptorProto_Type.TYPE_FIXED64: - return "TYPE_FIXED64"; - case FieldDescriptorProto_Type.TYPE_FIXED32: - return "TYPE_FIXED32"; - case FieldDescriptorProto_Type.TYPE_BOOL: - return "TYPE_BOOL"; - case FieldDescriptorProto_Type.TYPE_STRING: - return "TYPE_STRING"; - case FieldDescriptorProto_Type.TYPE_GROUP: - return "TYPE_GROUP"; - case FieldDescriptorProto_Type.TYPE_MESSAGE: - return "TYPE_MESSAGE"; - case FieldDescriptorProto_Type.TYPE_BYTES: - return "TYPE_BYTES"; - case FieldDescriptorProto_Type.TYPE_UINT32: - return "TYPE_UINT32"; - case FieldDescriptorProto_Type.TYPE_ENUM: - return "TYPE_ENUM"; - case FieldDescriptorProto_Type.TYPE_SFIXED32: - return "TYPE_SFIXED32"; - case FieldDescriptorProto_Type.TYPE_SFIXED64: - return "TYPE_SFIXED64"; - case FieldDescriptorProto_Type.TYPE_SINT32: - return "TYPE_SINT32"; - case FieldDescriptorProto_Type.TYPE_SINT64: - return "TYPE_SINT64"; - case FieldDescriptorProto_Type.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export enum FieldDescriptorProto_Label { - /** LABEL_OPTIONAL - 0 is reserved for errors */ - LABEL_OPTIONAL = 1, - LABEL_REQUIRED = 2, - LABEL_REPEATED = 3, - UNRECOGNIZED = -1, -} - -export function fieldDescriptorProto_LabelFromJSON(object: any): FieldDescriptorProto_Label { - switch (object) { - case 1: - case "LABEL_OPTIONAL": - return FieldDescriptorProto_Label.LABEL_OPTIONAL; - case 2: - case "LABEL_REQUIRED": - return FieldDescriptorProto_Label.LABEL_REQUIRED; - case 3: - case "LABEL_REPEATED": - return FieldDescriptorProto_Label.LABEL_REPEATED; - case -1: - case "UNRECOGNIZED": - default: - return FieldDescriptorProto_Label.UNRECOGNIZED; - } -} - -export function fieldDescriptorProto_LabelToJSON(object: FieldDescriptorProto_Label): string { - switch (object) { - case FieldDescriptorProto_Label.LABEL_OPTIONAL: - return "LABEL_OPTIONAL"; - case FieldDescriptorProto_Label.LABEL_REQUIRED: - return "LABEL_REQUIRED"; - case FieldDescriptorProto_Label.LABEL_REPEATED: - return "LABEL_REPEATED"; - case FieldDescriptorProto_Label.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Describes a oneof. */ -export interface OneofDescriptorProto { - name: string; - options: OneofOptions | undefined; -} - -/** Describes an enum type. */ -export interface EnumDescriptorProto { - name: string; - value: EnumValueDescriptorProto[]; - options: - | EnumOptions - | undefined; - /** - * Range of reserved numeric values. Reserved numeric values may not be used - * by enum values in the same enum declaration. Reserved ranges may not - * overlap. - */ - reservedRange: EnumDescriptorProto_EnumReservedRange[]; - /** - * Reserved enum value names, which may not be reused. A given name may only - * be reserved once. - */ - reservedName: string[]; -} - -/** - * Range of reserved numeric values. Reserved values may not be used by - * entries in the same enum. Reserved ranges may not overlap. - * - * Note that this is distinct from DescriptorProto.ReservedRange in that it - * is inclusive such that it can appropriately represent the entire int32 - * domain. - */ -export interface EnumDescriptorProto_EnumReservedRange { - /** Inclusive. */ - start: number; - /** Inclusive. */ - end: number; -} - -/** Describes a value within an enum. */ -export interface EnumValueDescriptorProto { - name: string; - number: number; - options: EnumValueOptions | undefined; -} - -/** Describes a service. */ -export interface ServiceDescriptorProto { - name: string; - method: MethodDescriptorProto[]; - options: ServiceOptions | undefined; -} - -/** Describes a method of a service. */ -export interface MethodDescriptorProto { - name: string; - /** - * Input and output type names. These are resolved in the same way as - * FieldDescriptorProto.type_name, but must refer to a message type. - */ - inputType: string; - outputType: string; - options: - | MethodOptions - | undefined; - /** Identifies if client streams multiple client messages */ - clientStreaming: boolean; - /** Identifies if server streams multiple server messages */ - serverStreaming: boolean; -} - -export interface FileOptions { - /** - * Sets the Java package where classes generated from this .proto will be - * placed. By default, the proto package is used, but this is often - * inappropriate because proto packages do not normally start with backwards - * domain names. - */ - javaPackage: string; - /** - * Controls the name of the wrapper Java class generated for the .proto file. - * That class will always contain the .proto file's getDescriptor() method as - * well as any top-level extensions defined in the .proto file. - * If java_multiple_files is disabled, then all the other classes from the - * .proto file will be nested inside the single wrapper outer class. - */ - javaOuterClassname: string; - /** - * If enabled, then the Java code generator will generate a separate .java - * file for each top-level message, enum, and service defined in the .proto - * file. Thus, these types will *not* be nested inside the wrapper class - * named by java_outer_classname. However, the wrapper class will still be - * generated to contain the file's getDescriptor() method as well as any - * top-level extensions defined in the file. - */ - javaMultipleFiles: boolean; - /** - * This option does nothing. - * - * @deprecated - */ - javaGenerateEqualsAndHash: boolean; - /** - * If set true, then the Java2 code generator will generate code that - * throws an exception whenever an attempt is made to assign a non-UTF-8 - * byte sequence to a string field. - * Message reflection will do the same. - * However, an extension field still accepts non-UTF-8 byte sequences. - * This option has no effect on when used with the lite runtime. - */ - javaStringCheckUtf8: boolean; - optimizeFor: FileOptions_OptimizeMode; - /** - * Sets the Go package where structs generated from this .proto will be - * placed. If omitted, the Go package will be derived from the following: - * - The basename of the package import path, if provided. - * - Otherwise, the package statement in the .proto file, if present. - * - Otherwise, the basename of the .proto file, without extension. - */ - goPackage: string; - /** - * Should generic services be generated in each language? "Generic" services - * are not specific to any particular RPC system. They are generated by the - * main code generators in each language (without additional plugins). - * Generic services were the only kind of service generation supported by - * early versions of google.protobuf. - * - * Generic services are now considered deprecated in favor of using plugins - * that generate code specific to your particular RPC system. Therefore, - * these default to false. Old code which depends on generic services should - * explicitly set them to true. - */ - ccGenericServices: boolean; - javaGenericServices: boolean; - pyGenericServices: boolean; - phpGenericServices: boolean; - /** - * Is this file deprecated? - * Depending on the target platform, this can emit Deprecated annotations - * for everything in the file, or it will be completely ignored; in the very - * least, this is a formalization for deprecating files. - */ - deprecated: boolean; - /** - * Enables the use of arenas for the proto messages in this file. This applies - * only to generated classes for C++. - */ - ccEnableArenas: boolean; - /** - * Sets the objective c class prefix which is prepended to all objective c - * generated classes from this .proto. There is no default. - */ - objcClassPrefix: string; - /** Namespace for generated classes; defaults to the package. */ - csharpNamespace: string; - /** - * By default Swift generators will take the proto package and CamelCase it - * replacing '.' with underscore and use that to prefix the types/symbols - * defined. When this options is provided, they will use this value instead - * to prefix the types/symbols defined. - */ - swiftPrefix: string; - /** - * Sets the php class prefix which is prepended to all php generated classes - * from this .proto. Default is empty. - */ - phpClassPrefix: string; - /** - * Use this option to change the namespace of php generated classes. Default - * is empty. When this option is empty, the package name will be used for - * determining the namespace. - */ - phpNamespace: string; - /** - * Use this option to change the namespace of php generated metadata classes. - * Default is empty. When this option is empty, the proto file name will be - * used for determining the namespace. - */ - phpMetadataNamespace: string; - /** - * Use this option to change the package of ruby generated classes. Default - * is empty. When this option is not set, the package name will be used for - * determining the ruby package. - */ - rubyPackage: string; - /** - * The parser stores options it doesn't recognize here. - * See the documentation for the "Options" section above. - */ - uninterpretedOption: UninterpretedOption[]; -} - -/** Generated classes can be optimized for speed or code size. */ -export enum FileOptions_OptimizeMode { - /** SPEED - Generate complete code for parsing, serialization, */ - SPEED = 1, - /** CODE_SIZE - etc. */ - CODE_SIZE = 2, - /** LITE_RUNTIME - Generate code using MessageLite and the lite runtime. */ - LITE_RUNTIME = 3, - UNRECOGNIZED = -1, -} - -export function fileOptions_OptimizeModeFromJSON(object: any): FileOptions_OptimizeMode { - switch (object) { - case 1: - case "SPEED": - return FileOptions_OptimizeMode.SPEED; - case 2: - case "CODE_SIZE": - return FileOptions_OptimizeMode.CODE_SIZE; - case 3: - case "LITE_RUNTIME": - return FileOptions_OptimizeMode.LITE_RUNTIME; - case -1: - case "UNRECOGNIZED": - default: - return FileOptions_OptimizeMode.UNRECOGNIZED; - } -} - -export function fileOptions_OptimizeModeToJSON(object: FileOptions_OptimizeMode): string { - switch (object) { - case FileOptions_OptimizeMode.SPEED: - return "SPEED"; - case FileOptions_OptimizeMode.CODE_SIZE: - return "CODE_SIZE"; - case FileOptions_OptimizeMode.LITE_RUNTIME: - return "LITE_RUNTIME"; - case FileOptions_OptimizeMode.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface MessageOptions { - /** - * Set true to use the old proto1 MessageSet wire format for extensions. - * This is provided for backwards-compatibility with the MessageSet wire - * format. You should not use this for any other reason: It's less - * efficient, has fewer features, and is more complicated. - * - * The message must be defined exactly as follows: - * message Foo { - * option message_set_wire_format = true; - * extensions 4 to max; - * } - * Note that the message cannot have any defined fields; MessageSets only - * have extensions. - * - * All extensions of your type must be singular messages; e.g. they cannot - * be int32s, enums, or repeated messages. - * - * Because this is an option, the above two restrictions are not enforced by - * the protocol compiler. - */ - messageSetWireFormat: boolean; - /** - * Disables the generation of the standard "descriptor()" accessor, which can - * conflict with a field of the same name. This is meant to make migration - * from proto1 easier; new code should avoid fields named "descriptor". - */ - noStandardDescriptorAccessor: boolean; - /** - * Is this message deprecated? - * Depending on the target platform, this can emit Deprecated annotations - * for the message, or it will be completely ignored; in the very least, - * this is a formalization for deprecating messages. - */ - deprecated: boolean; - /** - * NOTE: Do not set the option in .proto files. Always use the maps syntax - * instead. The option should only be implicitly set by the proto compiler - * parser. - * - * Whether the message is an automatically generated map entry type for the - * maps field. - * - * For maps fields: - * map map_field = 1; - * The parsed descriptor looks like: - * message MapFieldEntry { - * option map_entry = true; - * optional KeyType key = 1; - * optional ValueType value = 2; - * } - * repeated MapFieldEntry map_field = 1; - * - * Implementations may choose not to generate the map_entry=true message, but - * use a native map in the target language to hold the keys and values. - * The reflection APIs in such implementations still need to work as - * if the field is a repeated message field. - */ - mapEntry: boolean; - /** - * Enable the legacy handling of JSON field name conflicts. This lowercases - * and strips underscored from the fields before comparison in proto3 only. - * The new behavior takes `json_name` into account and applies to proto2 as - * well. - * - * This should only be used as a temporary measure against broken builds due - * to the change in behavior for JSON field name conflicts. - * - * TODO(b/261750190) This is legacy behavior we plan to remove once downstream - * teams have had time to migrate. - * - * @deprecated - */ - deprecatedLegacyJsonFieldConflicts: boolean; - /** The parser stores options it doesn't recognize here. See above. */ - uninterpretedOption: UninterpretedOption[]; -} - -export interface FieldOptions { - /** - * The ctype option instructs the C++ code generator to use a different - * representation of the field than it normally would. See the specific - * options below. This option is not yet implemented in the open source - * release -- sorry, we'll try to include it in a future version! - */ - ctype: FieldOptions_CType; - /** - * The packed option can be enabled for repeated primitive fields to enable - * a more efficient representation on the wire. Rather than repeatedly - * writing the tag and type for each element, the entire array is encoded as - * a single length-delimited blob. In proto3, only explicit setting it to - * false will avoid using packed encoding. - */ - packed: boolean; - /** - * The jstype option determines the JavaScript type used for values of the - * field. The option is permitted only for 64 bit integral and fixed types - * (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - * is represented as JavaScript string, which avoids loss of precision that - * can happen when a large value is converted to a floating point JavaScript. - * Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - * use the JavaScript "number" type. The behavior of the default option - * JS_NORMAL is implementation dependent. - * - * This option is an enum to permit additional types to be added, e.g. - * goog.math.Integer. - */ - jstype: FieldOptions_JSType; - /** - * Should this field be parsed lazily? Lazy applies only to message-type - * fields. It means that when the outer message is initially parsed, the - * inner message's contents will not be parsed but instead stored in encoded - * form. The inner message will actually be parsed when it is first accessed. - * - * This is only a hint. Implementations are free to choose whether to use - * eager or lazy parsing regardless of the value of this option. However, - * setting this option true suggests that the protocol author believes that - * using lazy parsing on this field is worth the additional bookkeeping - * overhead typically needed to implement it. - * - * This option does not affect the public interface of any generated code; - * all method signatures remain the same. Furthermore, thread-safety of the - * interface is not affected by this option; const methods remain safe to - * call from multiple threads concurrently, while non-const methods continue - * to require exclusive access. - * - * Note that implementations may choose not to check required fields within - * a lazy sub-message. That is, calling IsInitialized() on the outer message - * may return true even if the inner message has missing required fields. - * This is necessary because otherwise the inner message would have to be - * parsed in order to perform the check, defeating the purpose of lazy - * parsing. An implementation which chooses not to check required fields - * must be consistent about it. That is, for any particular sub-message, the - * implementation must either *always* check its required fields, or *never* - * check its required fields, regardless of whether or not the message has - * been parsed. - * - * As of May 2022, lazy verifies the contents of the byte stream during - * parsing. An invalid byte stream will cause the overall parsing to fail. - */ - lazy: boolean; - /** - * unverified_lazy does no correctness checks on the byte stream. This should - * only be used where lazy with verification is prohibitive for performance - * reasons. - */ - unverifiedLazy: boolean; - /** - * Is this field deprecated? - * Depending on the target platform, this can emit Deprecated annotations - * for accessors, or it will be completely ignored; in the very least, this - * is a formalization for deprecating fields. - */ - deprecated: boolean; - /** For Google-internal migration only. Do not use. */ - weak: boolean; - /** - * Indicate that the field value should not be printed out when using debug - * formats, e.g. when the field contains sensitive credentials. - */ - debugRedact: boolean; - retention: FieldOptions_OptionRetention; - target: FieldOptions_OptionTargetType; - /** The parser stores options it doesn't recognize here. See above. */ - uninterpretedOption: UninterpretedOption[]; -} - -export enum FieldOptions_CType { - /** STRING - Default mode. */ - STRING = 0, - CORD = 1, - STRING_PIECE = 2, - UNRECOGNIZED = -1, -} - -export function fieldOptions_CTypeFromJSON(object: any): FieldOptions_CType { - switch (object) { - case 0: - case "STRING": - return FieldOptions_CType.STRING; - case 1: - case "CORD": - return FieldOptions_CType.CORD; - case 2: - case "STRING_PIECE": - return FieldOptions_CType.STRING_PIECE; - case -1: - case "UNRECOGNIZED": - default: - return FieldOptions_CType.UNRECOGNIZED; - } -} - -export function fieldOptions_CTypeToJSON(object: FieldOptions_CType): string { - switch (object) { - case FieldOptions_CType.STRING: - return "STRING"; - case FieldOptions_CType.CORD: - return "CORD"; - case FieldOptions_CType.STRING_PIECE: - return "STRING_PIECE"; - case FieldOptions_CType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export enum FieldOptions_JSType { - /** JS_NORMAL - Use the default type. */ - JS_NORMAL = 0, - /** JS_STRING - Use JavaScript strings. */ - JS_STRING = 1, - /** JS_NUMBER - Use JavaScript numbers. */ - JS_NUMBER = 2, - UNRECOGNIZED = -1, -} - -export function fieldOptions_JSTypeFromJSON(object: any): FieldOptions_JSType { - switch (object) { - case 0: - case "JS_NORMAL": - return FieldOptions_JSType.JS_NORMAL; - case 1: - case "JS_STRING": - return FieldOptions_JSType.JS_STRING; - case 2: - case "JS_NUMBER": - return FieldOptions_JSType.JS_NUMBER; - case -1: - case "UNRECOGNIZED": - default: - return FieldOptions_JSType.UNRECOGNIZED; - } -} - -export function fieldOptions_JSTypeToJSON(object: FieldOptions_JSType): string { - switch (object) { - case FieldOptions_JSType.JS_NORMAL: - return "JS_NORMAL"; - case FieldOptions_JSType.JS_STRING: - return "JS_STRING"; - case FieldOptions_JSType.JS_NUMBER: - return "JS_NUMBER"; - case FieldOptions_JSType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** - * If set to RETENTION_SOURCE, the option will be omitted from the binary. - * Note: as of January 2023, support for this is in progress and does not yet - * have an effect (b/264593489). - */ -export enum FieldOptions_OptionRetention { - RETENTION_UNKNOWN = 0, - RETENTION_RUNTIME = 1, - RETENTION_SOURCE = 2, - UNRECOGNIZED = -1, -} - -export function fieldOptions_OptionRetentionFromJSON(object: any): FieldOptions_OptionRetention { - switch (object) { - case 0: - case "RETENTION_UNKNOWN": - return FieldOptions_OptionRetention.RETENTION_UNKNOWN; - case 1: - case "RETENTION_RUNTIME": - return FieldOptions_OptionRetention.RETENTION_RUNTIME; - case 2: - case "RETENTION_SOURCE": - return FieldOptions_OptionRetention.RETENTION_SOURCE; - case -1: - case "UNRECOGNIZED": - default: - return FieldOptions_OptionRetention.UNRECOGNIZED; - } -} - -export function fieldOptions_OptionRetentionToJSON(object: FieldOptions_OptionRetention): string { - switch (object) { - case FieldOptions_OptionRetention.RETENTION_UNKNOWN: - return "RETENTION_UNKNOWN"; - case FieldOptions_OptionRetention.RETENTION_RUNTIME: - return "RETENTION_RUNTIME"; - case FieldOptions_OptionRetention.RETENTION_SOURCE: - return "RETENTION_SOURCE"; - case FieldOptions_OptionRetention.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** - * This indicates the types of entities that the field may apply to when used - * as an option. If it is unset, then the field may be freely used as an - * option on any kind of entity. Note: as of January 2023, support for this is - * in progress and does not yet have an effect (b/264593489). - */ -export enum FieldOptions_OptionTargetType { - TARGET_TYPE_UNKNOWN = 0, - TARGET_TYPE_FILE = 1, - TARGET_TYPE_EXTENSION_RANGE = 2, - TARGET_TYPE_MESSAGE = 3, - TARGET_TYPE_FIELD = 4, - TARGET_TYPE_ONEOF = 5, - TARGET_TYPE_ENUM = 6, - TARGET_TYPE_ENUM_ENTRY = 7, - TARGET_TYPE_SERVICE = 8, - TARGET_TYPE_METHOD = 9, - UNRECOGNIZED = -1, -} - -export function fieldOptions_OptionTargetTypeFromJSON(object: any): FieldOptions_OptionTargetType { - switch (object) { - case 0: - case "TARGET_TYPE_UNKNOWN": - return FieldOptions_OptionTargetType.TARGET_TYPE_UNKNOWN; - case 1: - case "TARGET_TYPE_FILE": - return FieldOptions_OptionTargetType.TARGET_TYPE_FILE; - case 2: - case "TARGET_TYPE_EXTENSION_RANGE": - return FieldOptions_OptionTargetType.TARGET_TYPE_EXTENSION_RANGE; - case 3: - case "TARGET_TYPE_MESSAGE": - return FieldOptions_OptionTargetType.TARGET_TYPE_MESSAGE; - case 4: - case "TARGET_TYPE_FIELD": - return FieldOptions_OptionTargetType.TARGET_TYPE_FIELD; - case 5: - case "TARGET_TYPE_ONEOF": - return FieldOptions_OptionTargetType.TARGET_TYPE_ONEOF; - case 6: - case "TARGET_TYPE_ENUM": - return FieldOptions_OptionTargetType.TARGET_TYPE_ENUM; - case 7: - case "TARGET_TYPE_ENUM_ENTRY": - return FieldOptions_OptionTargetType.TARGET_TYPE_ENUM_ENTRY; - case 8: - case "TARGET_TYPE_SERVICE": - return FieldOptions_OptionTargetType.TARGET_TYPE_SERVICE; - case 9: - case "TARGET_TYPE_METHOD": - return FieldOptions_OptionTargetType.TARGET_TYPE_METHOD; - case -1: - case "UNRECOGNIZED": - default: - return FieldOptions_OptionTargetType.UNRECOGNIZED; - } -} - -export function fieldOptions_OptionTargetTypeToJSON(object: FieldOptions_OptionTargetType): string { - switch (object) { - case FieldOptions_OptionTargetType.TARGET_TYPE_UNKNOWN: - return "TARGET_TYPE_UNKNOWN"; - case FieldOptions_OptionTargetType.TARGET_TYPE_FILE: - return "TARGET_TYPE_FILE"; - case FieldOptions_OptionTargetType.TARGET_TYPE_EXTENSION_RANGE: - return "TARGET_TYPE_EXTENSION_RANGE"; - case FieldOptions_OptionTargetType.TARGET_TYPE_MESSAGE: - return "TARGET_TYPE_MESSAGE"; - case FieldOptions_OptionTargetType.TARGET_TYPE_FIELD: - return "TARGET_TYPE_FIELD"; - case FieldOptions_OptionTargetType.TARGET_TYPE_ONEOF: - return "TARGET_TYPE_ONEOF"; - case FieldOptions_OptionTargetType.TARGET_TYPE_ENUM: - return "TARGET_TYPE_ENUM"; - case FieldOptions_OptionTargetType.TARGET_TYPE_ENUM_ENTRY: - return "TARGET_TYPE_ENUM_ENTRY"; - case FieldOptions_OptionTargetType.TARGET_TYPE_SERVICE: - return "TARGET_TYPE_SERVICE"; - case FieldOptions_OptionTargetType.TARGET_TYPE_METHOD: - return "TARGET_TYPE_METHOD"; - case FieldOptions_OptionTargetType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface OneofOptions { - /** The parser stores options it doesn't recognize here. See above. */ - uninterpretedOption: UninterpretedOption[]; -} - -export interface EnumOptions { - /** - * Set this option to true to allow mapping different tag names to the same - * value. - */ - allowAlias: boolean; - /** - * Is this enum deprecated? - * Depending on the target platform, this can emit Deprecated annotations - * for the enum, or it will be completely ignored; in the very least, this - * is a formalization for deprecating enums. - */ - deprecated: boolean; - /** - * Enable the legacy handling of JSON field name conflicts. This lowercases - * and strips underscored from the fields before comparison in proto3 only. - * The new behavior takes `json_name` into account and applies to proto2 as - * well. - * TODO(b/261750190) Remove this legacy behavior once downstream teams have - * had time to migrate. - * - * @deprecated - */ - deprecatedLegacyJsonFieldConflicts: boolean; - /** The parser stores options it doesn't recognize here. See above. */ - uninterpretedOption: UninterpretedOption[]; -} - -export interface EnumValueOptions { - /** - * Is this enum value deprecated? - * Depending on the target platform, this can emit Deprecated annotations - * for the enum value, or it will be completely ignored; in the very least, - * this is a formalization for deprecating enum values. - */ - deprecated: boolean; - /** The parser stores options it doesn't recognize here. See above. */ - uninterpretedOption: UninterpretedOption[]; -} - -export interface ServiceOptions { - /** - * Is this service deprecated? - * Depending on the target platform, this can emit Deprecated annotations - * for the service, or it will be completely ignored; in the very least, - * this is a formalization for deprecating services. - */ - deprecated: boolean; - /** The parser stores options it doesn't recognize here. See above. */ - uninterpretedOption: UninterpretedOption[]; -} - -export interface MethodOptions { - /** - * Is this method deprecated? - * Depending on the target platform, this can emit Deprecated annotations - * for the method, or it will be completely ignored; in the very least, - * this is a formalization for deprecating methods. - */ - deprecated: boolean; - idempotencyLevel: MethodOptions_IdempotencyLevel; - /** The parser stores options it doesn't recognize here. See above. */ - uninterpretedOption: UninterpretedOption[]; -} - -/** - * Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - * or neither? HTTP based RPC implementation may choose GET verb for safe - * methods, and PUT verb for idempotent methods instead of the default POST. - */ -export enum MethodOptions_IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0, - /** NO_SIDE_EFFECTS - implies idempotent */ - NO_SIDE_EFFECTS = 1, - /** IDEMPOTENT - idempotent, but may have side effects */ - IDEMPOTENT = 2, - UNRECOGNIZED = -1, -} - -export function methodOptions_IdempotencyLevelFromJSON(object: any): MethodOptions_IdempotencyLevel { - switch (object) { - case 0: - case "IDEMPOTENCY_UNKNOWN": - return MethodOptions_IdempotencyLevel.IDEMPOTENCY_UNKNOWN; - case 1: - case "NO_SIDE_EFFECTS": - return MethodOptions_IdempotencyLevel.NO_SIDE_EFFECTS; - case 2: - case "IDEMPOTENT": - return MethodOptions_IdempotencyLevel.IDEMPOTENT; - case -1: - case "UNRECOGNIZED": - default: - return MethodOptions_IdempotencyLevel.UNRECOGNIZED; - } -} - -export function methodOptions_IdempotencyLevelToJSON(object: MethodOptions_IdempotencyLevel): string { - switch (object) { - case MethodOptions_IdempotencyLevel.IDEMPOTENCY_UNKNOWN: - return "IDEMPOTENCY_UNKNOWN"; - case MethodOptions_IdempotencyLevel.NO_SIDE_EFFECTS: - return "NO_SIDE_EFFECTS"; - case MethodOptions_IdempotencyLevel.IDEMPOTENT: - return "IDEMPOTENT"; - case MethodOptions_IdempotencyLevel.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** - * A message representing a option the parser does not recognize. This only - * appears in options protos created by the compiler::Parser class. - * DescriptorPool resolves these when building Descriptor objects. Therefore, - * options protos in descriptor objects (e.g. returned by Descriptor::options(), - * or produced by Descriptor::CopyTo()) will never have UninterpretedOptions - * in them. - */ -export interface UninterpretedOption { - name: UninterpretedOption_NamePart[]; - /** - * The value of the uninterpreted option, in whatever type the tokenizer - * identified it as during parsing. Exactly one of these should be set. - */ - identifierValue: string; - positiveIntValue: number; - negativeIntValue: number; - doubleValue: number; - stringValue: Uint8Array; - aggregateValue: string; -} - -/** - * The name of the uninterpreted option. Each string represents a segment in - * a dot-separated name. is_extension is true iff a segment represents an - * extension (denoted with parentheses in options specs in .proto files). - * E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents - * "foo.(bar.baz).moo". - */ -export interface UninterpretedOption_NamePart { - namePart: string; - isExtension: boolean; -} - -/** - * Encapsulates information about the original source file from which a - * FileDescriptorProto was generated. - */ -export interface SourceCodeInfo { - /** - * A Location identifies a piece of source code in a .proto file which - * corresponds to a particular definition. This information is intended - * to be useful to IDEs, code indexers, documentation generators, and similar - * tools. - * - * For example, say we have a file like: - * message Foo { - * optional string foo = 1; - * } - * Let's look at just the field definition: - * optional string foo = 1; - * ^ ^^ ^^ ^ ^^^ - * a bc de f ghi - * We have the following locations: - * span path represents - * [a,i) [ 4, 0, 2, 0 ] The whole field definition. - * [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - * [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - * [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - * [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - * - * Notes: - * - A location may refer to a repeated field itself (i.e. not to any - * particular index within it). This is used whenever a set of elements are - * logically enclosed in a single code segment. For example, an entire - * extend block (possibly containing multiple extension definitions) will - * have an outer location whose path refers to the "extensions" repeated - * field without an index. - * - Multiple locations may have the same path. This happens when a single - * logical declaration is spread out across multiple places. The most - * obvious example is the "extend" block again -- there may be multiple - * extend blocks in the same scope, each of which will have the same path. - * - A location's span is not always a subset of its parent's span. For - * example, the "extendee" of an extension declaration appears at the - * beginning of the "extend" block and is shared by all extensions within - * the block. - * - Just because a location's span is a subset of some other location's span - * does not mean that it is a descendant. For example, a "group" defines - * both a type and a field in a single declaration. Thus, the locations - * corresponding to the type and field and their components will overlap. - * - Code which tries to interpret locations should probably be designed to - * ignore those that it doesn't understand, as more types of locations could - * be recorded in the future. - */ - location: SourceCodeInfo_Location[]; -} - -export interface SourceCodeInfo_Location { - /** - * Identifies which part of the FileDescriptorProto was defined at this - * location. - * - * Each element is a field number or an index. They form a path from - * the root FileDescriptorProto to the place where the definition occurs. - * For example, this path: - * [ 4, 3, 2, 7, 1 ] - * refers to: - * file.message_type(3) // 4, 3 - * .field(7) // 2, 7 - * .name() // 1 - * This is because FileDescriptorProto.message_type has field number 4: - * repeated DescriptorProto message_type = 4; - * and DescriptorProto.field has field number 2: - * repeated FieldDescriptorProto field = 2; - * and FieldDescriptorProto.name has field number 1: - * optional string name = 1; - * - * Thus, the above path gives the location of a field name. If we removed - * the last element: - * [ 4, 3, 2, 7 ] - * this path refers to the whole field declaration (from the beginning - * of the label to the terminating semicolon). - */ - path: number[]; - /** - * Always has exactly three or four elements: start line, start column, - * end line (optional, otherwise assumed same as start line), end column. - * These are packed into a single field for efficiency. Note that line - * and column numbers are zero-based -- typically you will want to add - * 1 to each before displaying to a user. - */ - span: number[]; - /** - * If this SourceCodeInfo represents a complete declaration, these are any - * comments appearing before and after the declaration which appear to be - * attached to the declaration. - * - * A series of line comments appearing on consecutive lines, with no other - * tokens appearing on those lines, will be treated as a single comment. - * - * leading_detached_comments will keep paragraphs of comments that appear - * before (but not connected to) the current element. Each paragraph, - * separated by empty lines, will be one comment element in the repeated - * field. - * - * Only the comment content is provided; comment markers (e.g. //) are - * stripped out. For block comments, leading whitespace and an asterisk - * will be stripped from the beginning of each line other than the first. - * Newlines are included in the output. - * - * Examples: - * - * optional int32 foo = 1; // Comment attached to foo. - * // Comment attached to bar. - * optional int32 bar = 2; - * - * optional string baz = 3; - * // Comment attached to baz. - * // Another line attached to baz. - * - * // Comment attached to moo. - * // - * // Another line attached to moo. - * optional double moo = 4; - * - * // Detached comment for corge. This is not leading or trailing comments - * // to moo or corge because there are blank lines separating it from - * // both. - * - * // Detached comment for corge paragraph 2. - * - * optional string corge = 5; - * /* Block comment attached - * * to corge. Leading asterisks - * * will be removed. * / - * /* Block comment attached to - * * grault. * / - * optional int32 grault = 6; - * - * // ignored detached comments. - */ - leadingComments: string; - trailingComments: string; - leadingDetachedComments: string[]; -} - -/** - * Describes the relationship between generated code and its original source - * file. A GeneratedCodeInfo message is associated with only one generated - * source file, but may contain references to different source .proto files. - */ -export interface GeneratedCodeInfo { - /** - * An Annotation connects some span of text in generated code to an element - * of its generating .proto file. - */ - annotation: GeneratedCodeInfo_Annotation[]; -} - -export interface GeneratedCodeInfo_Annotation { - /** - * Identifies the element in the original source .proto file. This field - * is formatted the same as SourceCodeInfo.Location.path. - */ - path: number[]; - /** Identifies the filesystem path to the original source .proto. */ - sourceFile: string; - /** - * Identifies the starting offset in bytes in the generated code - * that relates to the identified object. - */ - begin: number; - /** - * Identifies the ending offset in bytes in the generated code that - * relates to the identified object. The end offset should be one past - * the last relevant byte (so the length of the text = end - begin). - */ - end: number; - semantic: GeneratedCodeInfo_Annotation_Semantic; -} - -/** - * Represents the identified object's effect on the element in the original - * .proto file. - */ -export enum GeneratedCodeInfo_Annotation_Semantic { - /** NONE - There is no effect or the effect is indescribable. */ - NONE = 0, - /** SET - The element is set or otherwise mutated. */ - SET = 1, - /** ALIAS - An alias to the element is returned. */ - ALIAS = 2, - UNRECOGNIZED = -1, -} - -export function generatedCodeInfo_Annotation_SemanticFromJSON(object: any): GeneratedCodeInfo_Annotation_Semantic { - switch (object) { - case 0: - case "NONE": - return GeneratedCodeInfo_Annotation_Semantic.NONE; - case 1: - case "SET": - return GeneratedCodeInfo_Annotation_Semantic.SET; - case 2: - case "ALIAS": - return GeneratedCodeInfo_Annotation_Semantic.ALIAS; - case -1: - case "UNRECOGNIZED": - default: - return GeneratedCodeInfo_Annotation_Semantic.UNRECOGNIZED; - } -} - -export function generatedCodeInfo_Annotation_SemanticToJSON(object: GeneratedCodeInfo_Annotation_Semantic): string { - switch (object) { - case GeneratedCodeInfo_Annotation_Semantic.NONE: - return "NONE"; - case GeneratedCodeInfo_Annotation_Semantic.SET: - return "SET"; - case GeneratedCodeInfo_Annotation_Semantic.ALIAS: - return "ALIAS"; - case GeneratedCodeInfo_Annotation_Semantic.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseFileDescriptorSet(): FileDescriptorSet { - return { file: [] }; -} - -export const FileDescriptorSet = { - encode(message: FileDescriptorSet, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.file) { - FileDescriptorProto.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FileDescriptorSet { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFileDescriptorSet(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.file.push(FileDescriptorProto.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FileDescriptorSet { - return { file: Array.isArray(object?.file) ? object.file.map((e: any) => FileDescriptorProto.fromJSON(e)) : [] }; - }, - - toJSON(message: FileDescriptorSet): unknown { - const obj: any = {}; - if (message.file?.length) { - obj.file = message.file.map((e) => FileDescriptorProto.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): FileDescriptorSet { - return FileDescriptorSet.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): FileDescriptorSet { - const message = createBaseFileDescriptorSet(); - message.file = object.file?.map((e) => FileDescriptorProto.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseFileDescriptorProto(): FileDescriptorProto { - return { - name: "", - package: "", - dependency: [], - publicDependency: [], - weakDependency: [], - messageType: [], - enumType: [], - service: [], - extension: [], - options: undefined, - sourceCodeInfo: undefined, - syntax: "", - edition: "", - }; -} - -export const FileDescriptorProto = { - encode(message: FileDescriptorProto, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.package !== "") { - writer.uint32(18).string(message.package); - } - for (const v of message.dependency) { - writer.uint32(26).string(v!); - } - writer.uint32(82).fork(); - for (const v of message.publicDependency) { - writer.int32(v); - } - writer.ldelim(); - writer.uint32(90).fork(); - for (const v of message.weakDependency) { - writer.int32(v); - } - writer.ldelim(); - for (const v of message.messageType) { - DescriptorProto.encode(v!, writer.uint32(34).fork()).ldelim(); - } - for (const v of message.enumType) { - EnumDescriptorProto.encode(v!, writer.uint32(42).fork()).ldelim(); - } - for (const v of message.service) { - ServiceDescriptorProto.encode(v!, writer.uint32(50).fork()).ldelim(); - } - for (const v of message.extension) { - FieldDescriptorProto.encode(v!, writer.uint32(58).fork()).ldelim(); - } - if (message.options !== undefined) { - FileOptions.encode(message.options, writer.uint32(66).fork()).ldelim(); - } - if (message.sourceCodeInfo !== undefined) { - SourceCodeInfo.encode(message.sourceCodeInfo, writer.uint32(74).fork()).ldelim(); - } - if (message.syntax !== "") { - writer.uint32(98).string(message.syntax); - } - if (message.edition !== "") { - writer.uint32(106).string(message.edition); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FileDescriptorProto { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFileDescriptorProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.package = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.dependency.push(reader.string()); - continue; - case 10: - if (tag === 80) { - message.publicDependency.push(reader.int32()); - - continue; - } - - if (tag === 82) { - const end2 = reader.uint32() + reader.pos; - while (reader.pos < end2) { - message.publicDependency.push(reader.int32()); - } - - continue; - } - - break; - case 11: - if (tag === 88) { - message.weakDependency.push(reader.int32()); - - continue; - } - - if (tag === 90) { - const end2 = reader.uint32() + reader.pos; - while (reader.pos < end2) { - message.weakDependency.push(reader.int32()); - } - - continue; - } - - break; - case 4: - if (tag !== 34) { - break; - } - - message.messageType.push(DescriptorProto.decode(reader, reader.uint32())); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.enumType.push(EnumDescriptorProto.decode(reader, reader.uint32())); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.service.push(ServiceDescriptorProto.decode(reader, reader.uint32())); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.extension.push(FieldDescriptorProto.decode(reader, reader.uint32())); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.options = FileOptions.decode(reader, reader.uint32()); - continue; - case 9: - if (tag !== 74) { - break; - } - - message.sourceCodeInfo = SourceCodeInfo.decode(reader, reader.uint32()); - continue; - case 12: - if (tag !== 98) { - break; - } - - message.syntax = reader.string(); - continue; - case 13: - if (tag !== 106) { - break; - } - - message.edition = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FileDescriptorProto { - return { - name: isSet(object.name) ? String(object.name) : "", - package: isSet(object.package) ? String(object.package) : "", - dependency: Array.isArray(object?.dependency) ? object.dependency.map((e: any) => String(e)) : [], - publicDependency: Array.isArray(object?.publicDependency) - ? object.publicDependency.map((e: any) => Number(e)) - : [], - weakDependency: Array.isArray(object?.weakDependency) ? object.weakDependency.map((e: any) => Number(e)) : [], - messageType: Array.isArray(object?.messageType) - ? object.messageType.map((e: any) => DescriptorProto.fromJSON(e)) - : [], - enumType: Array.isArray(object?.enumType) ? object.enumType.map((e: any) => EnumDescriptorProto.fromJSON(e)) : [], - service: Array.isArray(object?.service) ? object.service.map((e: any) => ServiceDescriptorProto.fromJSON(e)) : [], - extension: Array.isArray(object?.extension) - ? object.extension.map((e: any) => FieldDescriptorProto.fromJSON(e)) - : [], - options: isSet(object.options) ? FileOptions.fromJSON(object.options) : undefined, - sourceCodeInfo: isSet(object.sourceCodeInfo) ? SourceCodeInfo.fromJSON(object.sourceCodeInfo) : undefined, - syntax: isSet(object.syntax) ? String(object.syntax) : "", - edition: isSet(object.edition) ? String(object.edition) : "", - }; - }, - - toJSON(message: FileDescriptorProto): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.package !== "") { - obj.package = message.package; - } - if (message.dependency?.length) { - obj.dependency = message.dependency; - } - if (message.publicDependency?.length) { - obj.publicDependency = message.publicDependency.map((e) => Math.round(e)); - } - if (message.weakDependency?.length) { - obj.weakDependency = message.weakDependency.map((e) => Math.round(e)); - } - if (message.messageType?.length) { - obj.messageType = message.messageType.map((e) => DescriptorProto.toJSON(e)); - } - if (message.enumType?.length) { - obj.enumType = message.enumType.map((e) => EnumDescriptorProto.toJSON(e)); - } - if (message.service?.length) { - obj.service = message.service.map((e) => ServiceDescriptorProto.toJSON(e)); - } - if (message.extension?.length) { - obj.extension = message.extension.map((e) => FieldDescriptorProto.toJSON(e)); - } - if (message.options !== undefined) { - obj.options = FileOptions.toJSON(message.options); - } - if (message.sourceCodeInfo !== undefined) { - obj.sourceCodeInfo = SourceCodeInfo.toJSON(message.sourceCodeInfo); - } - if (message.syntax !== "") { - obj.syntax = message.syntax; - } - if (message.edition !== "") { - obj.edition = message.edition; - } - return obj; - }, - - create, I>>(base?: I): FileDescriptorProto { - return FileDescriptorProto.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): FileDescriptorProto { - const message = createBaseFileDescriptorProto(); - message.name = object.name ?? ""; - message.package = object.package ?? ""; - message.dependency = object.dependency?.map((e) => e) || []; - message.publicDependency = object.publicDependency?.map((e) => e) || []; - message.weakDependency = object.weakDependency?.map((e) => e) || []; - message.messageType = object.messageType?.map((e) => DescriptorProto.fromPartial(e)) || []; - message.enumType = object.enumType?.map((e) => EnumDescriptorProto.fromPartial(e)) || []; - message.service = object.service?.map((e) => ServiceDescriptorProto.fromPartial(e)) || []; - message.extension = object.extension?.map((e) => FieldDescriptorProto.fromPartial(e)) || []; - message.options = (object.options !== undefined && object.options !== null) - ? FileOptions.fromPartial(object.options) - : undefined; - message.sourceCodeInfo = (object.sourceCodeInfo !== undefined && object.sourceCodeInfo !== null) - ? SourceCodeInfo.fromPartial(object.sourceCodeInfo) - : undefined; - message.syntax = object.syntax ?? ""; - message.edition = object.edition ?? ""; - return message; - }, -}; - -function createBaseDescriptorProto(): DescriptorProto { - return { - name: "", - field: [], - extension: [], - nestedType: [], - enumType: [], - extensionRange: [], - oneofDecl: [], - options: undefined, - reservedRange: [], - reservedName: [], - }; -} - -export const DescriptorProto = { - encode(message: DescriptorProto, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - for (const v of message.field) { - FieldDescriptorProto.encode(v!, writer.uint32(18).fork()).ldelim(); - } - for (const v of message.extension) { - FieldDescriptorProto.encode(v!, writer.uint32(50).fork()).ldelim(); - } - for (const v of message.nestedType) { - DescriptorProto.encode(v!, writer.uint32(26).fork()).ldelim(); - } - for (const v of message.enumType) { - EnumDescriptorProto.encode(v!, writer.uint32(34).fork()).ldelim(); - } - for (const v of message.extensionRange) { - DescriptorProto_ExtensionRange.encode(v!, writer.uint32(42).fork()).ldelim(); - } - for (const v of message.oneofDecl) { - OneofDescriptorProto.encode(v!, writer.uint32(66).fork()).ldelim(); - } - if (message.options !== undefined) { - MessageOptions.encode(message.options, writer.uint32(58).fork()).ldelim(); - } - for (const v of message.reservedRange) { - DescriptorProto_ReservedRange.encode(v!, writer.uint32(74).fork()).ldelim(); - } - for (const v of message.reservedName) { - writer.uint32(82).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DescriptorProto { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDescriptorProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.field.push(FieldDescriptorProto.decode(reader, reader.uint32())); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.extension.push(FieldDescriptorProto.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.nestedType.push(DescriptorProto.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.enumType.push(EnumDescriptorProto.decode(reader, reader.uint32())); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.extensionRange.push(DescriptorProto_ExtensionRange.decode(reader, reader.uint32())); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.oneofDecl.push(OneofDescriptorProto.decode(reader, reader.uint32())); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.options = MessageOptions.decode(reader, reader.uint32()); - continue; - case 9: - if (tag !== 74) { - break; - } - - message.reservedRange.push(DescriptorProto_ReservedRange.decode(reader, reader.uint32())); - continue; - case 10: - if (tag !== 82) { - break; - } - - message.reservedName.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DescriptorProto { - return { - name: isSet(object.name) ? String(object.name) : "", - field: Array.isArray(object?.field) ? object.field.map((e: any) => FieldDescriptorProto.fromJSON(e)) : [], - extension: Array.isArray(object?.extension) - ? object.extension.map((e: any) => FieldDescriptorProto.fromJSON(e)) - : [], - nestedType: Array.isArray(object?.nestedType) - ? object.nestedType.map((e: any) => DescriptorProto.fromJSON(e)) - : [], - enumType: Array.isArray(object?.enumType) ? object.enumType.map((e: any) => EnumDescriptorProto.fromJSON(e)) : [], - extensionRange: Array.isArray(object?.extensionRange) - ? object.extensionRange.map((e: any) => DescriptorProto_ExtensionRange.fromJSON(e)) - : [], - oneofDecl: Array.isArray(object?.oneofDecl) - ? object.oneofDecl.map((e: any) => OneofDescriptorProto.fromJSON(e)) - : [], - options: isSet(object.options) ? MessageOptions.fromJSON(object.options) : undefined, - reservedRange: Array.isArray(object?.reservedRange) - ? object.reservedRange.map((e: any) => DescriptorProto_ReservedRange.fromJSON(e)) - : [], - reservedName: Array.isArray(object?.reservedName) ? object.reservedName.map((e: any) => String(e)) : [], - }; - }, - - toJSON(message: DescriptorProto): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.field?.length) { - obj.field = message.field.map((e) => FieldDescriptorProto.toJSON(e)); - } - if (message.extension?.length) { - obj.extension = message.extension.map((e) => FieldDescriptorProto.toJSON(e)); - } - if (message.nestedType?.length) { - obj.nestedType = message.nestedType.map((e) => DescriptorProto.toJSON(e)); - } - if (message.enumType?.length) { - obj.enumType = message.enumType.map((e) => EnumDescriptorProto.toJSON(e)); - } - if (message.extensionRange?.length) { - obj.extensionRange = message.extensionRange.map((e) => DescriptorProto_ExtensionRange.toJSON(e)); - } - if (message.oneofDecl?.length) { - obj.oneofDecl = message.oneofDecl.map((e) => OneofDescriptorProto.toJSON(e)); - } - if (message.options !== undefined) { - obj.options = MessageOptions.toJSON(message.options); - } - if (message.reservedRange?.length) { - obj.reservedRange = message.reservedRange.map((e) => DescriptorProto_ReservedRange.toJSON(e)); - } - if (message.reservedName?.length) { - obj.reservedName = message.reservedName; - } - return obj; - }, - - create, I>>(base?: I): DescriptorProto { - return DescriptorProto.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): DescriptorProto { - const message = createBaseDescriptorProto(); - message.name = object.name ?? ""; - message.field = object.field?.map((e) => FieldDescriptorProto.fromPartial(e)) || []; - message.extension = object.extension?.map((e) => FieldDescriptorProto.fromPartial(e)) || []; - message.nestedType = object.nestedType?.map((e) => DescriptorProto.fromPartial(e)) || []; - message.enumType = object.enumType?.map((e) => EnumDescriptorProto.fromPartial(e)) || []; - message.extensionRange = object.extensionRange?.map((e) => DescriptorProto_ExtensionRange.fromPartial(e)) || []; - message.oneofDecl = object.oneofDecl?.map((e) => OneofDescriptorProto.fromPartial(e)) || []; - message.options = (object.options !== undefined && object.options !== null) - ? MessageOptions.fromPartial(object.options) - : undefined; - message.reservedRange = object.reservedRange?.map((e) => DescriptorProto_ReservedRange.fromPartial(e)) || []; - message.reservedName = object.reservedName?.map((e) => e) || []; - return message; - }, -}; - -function createBaseDescriptorProto_ExtensionRange(): DescriptorProto_ExtensionRange { - return { start: 0, end: 0, options: undefined }; -} - -export const DescriptorProto_ExtensionRange = { - encode(message: DescriptorProto_ExtensionRange, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.start !== 0) { - writer.uint32(8).int32(message.start); - } - if (message.end !== 0) { - writer.uint32(16).int32(message.end); - } - if (message.options !== undefined) { - ExtensionRangeOptions.encode(message.options, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DescriptorProto_ExtensionRange { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDescriptorProto_ExtensionRange(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.start = reader.int32(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.end = reader.int32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.options = ExtensionRangeOptions.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DescriptorProto_ExtensionRange { - return { - start: isSet(object.start) ? Number(object.start) : 0, - end: isSet(object.end) ? Number(object.end) : 0, - options: isSet(object.options) ? ExtensionRangeOptions.fromJSON(object.options) : undefined, - }; - }, - - toJSON(message: DescriptorProto_ExtensionRange): unknown { - const obj: any = {}; - if (message.start !== 0) { - obj.start = Math.round(message.start); - } - if (message.end !== 0) { - obj.end = Math.round(message.end); - } - if (message.options !== undefined) { - obj.options = ExtensionRangeOptions.toJSON(message.options); - } - return obj; - }, - - create, I>>(base?: I): DescriptorProto_ExtensionRange { - return DescriptorProto_ExtensionRange.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): DescriptorProto_ExtensionRange { - const message = createBaseDescriptorProto_ExtensionRange(); - message.start = object.start ?? 0; - message.end = object.end ?? 0; - message.options = (object.options !== undefined && object.options !== null) - ? ExtensionRangeOptions.fromPartial(object.options) - : undefined; - return message; - }, -}; - -function createBaseDescriptorProto_ReservedRange(): DescriptorProto_ReservedRange { - return { start: 0, end: 0 }; -} - -export const DescriptorProto_ReservedRange = { - encode(message: DescriptorProto_ReservedRange, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.start !== 0) { - writer.uint32(8).int32(message.start); - } - if (message.end !== 0) { - writer.uint32(16).int32(message.end); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DescriptorProto_ReservedRange { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDescriptorProto_ReservedRange(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.start = reader.int32(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.end = reader.int32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DescriptorProto_ReservedRange { - return { start: isSet(object.start) ? Number(object.start) : 0, end: isSet(object.end) ? Number(object.end) : 0 }; - }, - - toJSON(message: DescriptorProto_ReservedRange): unknown { - const obj: any = {}; - if (message.start !== 0) { - obj.start = Math.round(message.start); - } - if (message.end !== 0) { - obj.end = Math.round(message.end); - } - return obj; - }, - - create, I>>(base?: I): DescriptorProto_ReservedRange { - return DescriptorProto_ReservedRange.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): DescriptorProto_ReservedRange { - const message = createBaseDescriptorProto_ReservedRange(); - message.start = object.start ?? 0; - message.end = object.end ?? 0; - return message; - }, -}; - -function createBaseExtensionRangeOptions(): ExtensionRangeOptions { - return { uninterpretedOption: [] }; -} - -export const ExtensionRangeOptions = { - encode(message: ExtensionRangeOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ExtensionRangeOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseExtensionRangeOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 999: - if (tag !== 7994) { - break; - } - - message.uninterpretedOption.push(UninterpretedOption.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ExtensionRangeOptions { - return { - uninterpretedOption: Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ExtensionRangeOptions): unknown { - const obj: any = {}; - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): ExtensionRangeOptions { - return ExtensionRangeOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): ExtensionRangeOptions { - const message = createBaseExtensionRangeOptions(); - message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseFieldDescriptorProto(): FieldDescriptorProto { - return { - name: "", - number: 0, - label: 1, - type: 1, - typeName: "", - extendee: "", - defaultValue: "", - oneofIndex: 0, - jsonName: "", - options: undefined, - proto3Optional: false, - }; -} - -export const FieldDescriptorProto = { - encode(message: FieldDescriptorProto, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.number !== 0) { - writer.uint32(24).int32(message.number); - } - if (message.label !== 1) { - writer.uint32(32).int32(message.label); - } - if (message.type !== 1) { - writer.uint32(40).int32(message.type); - } - if (message.typeName !== "") { - writer.uint32(50).string(message.typeName); - } - if (message.extendee !== "") { - writer.uint32(18).string(message.extendee); - } - if (message.defaultValue !== "") { - writer.uint32(58).string(message.defaultValue); - } - if (message.oneofIndex !== 0) { - writer.uint32(72).int32(message.oneofIndex); - } - if (message.jsonName !== "") { - writer.uint32(82).string(message.jsonName); - } - if (message.options !== undefined) { - FieldOptions.encode(message.options, writer.uint32(66).fork()).ldelim(); - } - if (message.proto3Optional === true) { - writer.uint32(136).bool(message.proto3Optional); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FieldDescriptorProto { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFieldDescriptorProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.number = reader.int32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.label = reader.int32() as any; - continue; - case 5: - if (tag !== 40) { - break; - } - - message.type = reader.int32() as any; - continue; - case 6: - if (tag !== 50) { - break; - } - - message.typeName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.extendee = reader.string(); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.defaultValue = reader.string(); - continue; - case 9: - if (tag !== 72) { - break; - } - - message.oneofIndex = reader.int32(); - continue; - case 10: - if (tag !== 82) { - break; - } - - message.jsonName = reader.string(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.options = FieldOptions.decode(reader, reader.uint32()); - continue; - case 17: - if (tag !== 136) { - break; - } - - message.proto3Optional = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FieldDescriptorProto { - return { - name: isSet(object.name) ? String(object.name) : "", - number: isSet(object.number) ? Number(object.number) : 0, - label: isSet(object.label) ? fieldDescriptorProto_LabelFromJSON(object.label) : 1, - type: isSet(object.type) ? fieldDescriptorProto_TypeFromJSON(object.type) : 1, - typeName: isSet(object.typeName) ? String(object.typeName) : "", - extendee: isSet(object.extendee) ? String(object.extendee) : "", - defaultValue: isSet(object.defaultValue) ? String(object.defaultValue) : "", - oneofIndex: isSet(object.oneofIndex) ? Number(object.oneofIndex) : 0, - jsonName: isSet(object.jsonName) ? String(object.jsonName) : "", - options: isSet(object.options) ? FieldOptions.fromJSON(object.options) : undefined, - proto3Optional: isSet(object.proto3Optional) ? Boolean(object.proto3Optional) : false, - }; - }, - - toJSON(message: FieldDescriptorProto): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.number !== 0) { - obj.number = Math.round(message.number); - } - if (message.label !== 1) { - obj.label = fieldDescriptorProto_LabelToJSON(message.label); - } - if (message.type !== 1) { - obj.type = fieldDescriptorProto_TypeToJSON(message.type); - } - if (message.typeName !== "") { - obj.typeName = message.typeName; - } - if (message.extendee !== "") { - obj.extendee = message.extendee; - } - if (message.defaultValue !== "") { - obj.defaultValue = message.defaultValue; - } - if (message.oneofIndex !== 0) { - obj.oneofIndex = Math.round(message.oneofIndex); - } - if (message.jsonName !== "") { - obj.jsonName = message.jsonName; - } - if (message.options !== undefined) { - obj.options = FieldOptions.toJSON(message.options); - } - if (message.proto3Optional === true) { - obj.proto3Optional = message.proto3Optional; - } - return obj; - }, - - create, I>>(base?: I): FieldDescriptorProto { - return FieldDescriptorProto.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): FieldDescriptorProto { - const message = createBaseFieldDescriptorProto(); - message.name = object.name ?? ""; - message.number = object.number ?? 0; - message.label = object.label ?? 1; - message.type = object.type ?? 1; - message.typeName = object.typeName ?? ""; - message.extendee = object.extendee ?? ""; - message.defaultValue = object.defaultValue ?? ""; - message.oneofIndex = object.oneofIndex ?? 0; - message.jsonName = object.jsonName ?? ""; - message.options = (object.options !== undefined && object.options !== null) - ? FieldOptions.fromPartial(object.options) - : undefined; - message.proto3Optional = object.proto3Optional ?? false; - return message; - }, -}; - -function createBaseOneofDescriptorProto(): OneofDescriptorProto { - return { name: "", options: undefined }; -} - -export const OneofDescriptorProto = { - encode(message: OneofDescriptorProto, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.options !== undefined) { - OneofOptions.encode(message.options, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OneofDescriptorProto { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOneofDescriptorProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.options = OneofOptions.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): OneofDescriptorProto { - return { - name: isSet(object.name) ? String(object.name) : "", - options: isSet(object.options) ? OneofOptions.fromJSON(object.options) : undefined, - }; - }, - - toJSON(message: OneofDescriptorProto): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.options !== undefined) { - obj.options = OneofOptions.toJSON(message.options); - } - return obj; - }, - - create, I>>(base?: I): OneofDescriptorProto { - return OneofDescriptorProto.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): OneofDescriptorProto { - const message = createBaseOneofDescriptorProto(); - message.name = object.name ?? ""; - message.options = (object.options !== undefined && object.options !== null) - ? OneofOptions.fromPartial(object.options) - : undefined; - return message; - }, -}; - -function createBaseEnumDescriptorProto(): EnumDescriptorProto { - return { name: "", value: [], options: undefined, reservedRange: [], reservedName: [] }; -} - -export const EnumDescriptorProto = { - encode(message: EnumDescriptorProto, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - for (const v of message.value) { - EnumValueDescriptorProto.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.options !== undefined) { - EnumOptions.encode(message.options, writer.uint32(26).fork()).ldelim(); - } - for (const v of message.reservedRange) { - EnumDescriptorProto_EnumReservedRange.encode(v!, writer.uint32(34).fork()).ldelim(); - } - for (const v of message.reservedName) { - writer.uint32(42).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnumDescriptorProto { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnumDescriptorProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value.push(EnumValueDescriptorProto.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.options = EnumOptions.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.reservedRange.push(EnumDescriptorProto_EnumReservedRange.decode(reader, reader.uint32())); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.reservedName.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnumDescriptorProto { - return { - name: isSet(object.name) ? String(object.name) : "", - value: Array.isArray(object?.value) ? object.value.map((e: any) => EnumValueDescriptorProto.fromJSON(e)) : [], - options: isSet(object.options) ? EnumOptions.fromJSON(object.options) : undefined, - reservedRange: Array.isArray(object?.reservedRange) - ? object.reservedRange.map((e: any) => EnumDescriptorProto_EnumReservedRange.fromJSON(e)) - : [], - reservedName: Array.isArray(object?.reservedName) ? object.reservedName.map((e: any) => String(e)) : [], - }; - }, - - toJSON(message: EnumDescriptorProto): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.value?.length) { - obj.value = message.value.map((e) => EnumValueDescriptorProto.toJSON(e)); - } - if (message.options !== undefined) { - obj.options = EnumOptions.toJSON(message.options); - } - if (message.reservedRange?.length) { - obj.reservedRange = message.reservedRange.map((e) => EnumDescriptorProto_EnumReservedRange.toJSON(e)); - } - if (message.reservedName?.length) { - obj.reservedName = message.reservedName; - } - return obj; - }, - - create, I>>(base?: I): EnumDescriptorProto { - return EnumDescriptorProto.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EnumDescriptorProto { - const message = createBaseEnumDescriptorProto(); - message.name = object.name ?? ""; - message.value = object.value?.map((e) => EnumValueDescriptorProto.fromPartial(e)) || []; - message.options = (object.options !== undefined && object.options !== null) - ? EnumOptions.fromPartial(object.options) - : undefined; - message.reservedRange = object.reservedRange?.map((e) => EnumDescriptorProto_EnumReservedRange.fromPartial(e)) || - []; - message.reservedName = object.reservedName?.map((e) => e) || []; - return message; - }, -}; - -function createBaseEnumDescriptorProto_EnumReservedRange(): EnumDescriptorProto_EnumReservedRange { - return { start: 0, end: 0 }; -} - -export const EnumDescriptorProto_EnumReservedRange = { - encode(message: EnumDescriptorProto_EnumReservedRange, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.start !== 0) { - writer.uint32(8).int32(message.start); - } - if (message.end !== 0) { - writer.uint32(16).int32(message.end); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnumDescriptorProto_EnumReservedRange { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnumDescriptorProto_EnumReservedRange(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.start = reader.int32(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.end = reader.int32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnumDescriptorProto_EnumReservedRange { - return { start: isSet(object.start) ? Number(object.start) : 0, end: isSet(object.end) ? Number(object.end) : 0 }; - }, - - toJSON(message: EnumDescriptorProto_EnumReservedRange): unknown { - const obj: any = {}; - if (message.start !== 0) { - obj.start = Math.round(message.start); - } - if (message.end !== 0) { - obj.end = Math.round(message.end); - } - return obj; - }, - - create, I>>( - base?: I, - ): EnumDescriptorProto_EnumReservedRange { - return EnumDescriptorProto_EnumReservedRange.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): EnumDescriptorProto_EnumReservedRange { - const message = createBaseEnumDescriptorProto_EnumReservedRange(); - message.start = object.start ?? 0; - message.end = object.end ?? 0; - return message; - }, -}; - -function createBaseEnumValueDescriptorProto(): EnumValueDescriptorProto { - return { name: "", number: 0, options: undefined }; -} - -export const EnumValueDescriptorProto = { - encode(message: EnumValueDescriptorProto, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.number !== 0) { - writer.uint32(16).int32(message.number); - } - if (message.options !== undefined) { - EnumValueOptions.encode(message.options, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnumValueDescriptorProto { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnumValueDescriptorProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.number = reader.int32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.options = EnumValueOptions.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnumValueDescriptorProto { - return { - name: isSet(object.name) ? String(object.name) : "", - number: isSet(object.number) ? Number(object.number) : 0, - options: isSet(object.options) ? EnumValueOptions.fromJSON(object.options) : undefined, - }; - }, - - toJSON(message: EnumValueDescriptorProto): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.number !== 0) { - obj.number = Math.round(message.number); - } - if (message.options !== undefined) { - obj.options = EnumValueOptions.toJSON(message.options); - } - return obj; - }, - - create, I>>(base?: I): EnumValueDescriptorProto { - return EnumValueDescriptorProto.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EnumValueDescriptorProto { - const message = createBaseEnumValueDescriptorProto(); - message.name = object.name ?? ""; - message.number = object.number ?? 0; - message.options = (object.options !== undefined && object.options !== null) - ? EnumValueOptions.fromPartial(object.options) - : undefined; - return message; - }, -}; - -function createBaseServiceDescriptorProto(): ServiceDescriptorProto { - return { name: "", method: [], options: undefined }; -} - -export const ServiceDescriptorProto = { - encode(message: ServiceDescriptorProto, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - for (const v of message.method) { - MethodDescriptorProto.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.options !== undefined) { - ServiceOptions.encode(message.options, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceDescriptorProto { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceDescriptorProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.method.push(MethodDescriptorProto.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.options = ServiceOptions.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceDescriptorProto { - return { - name: isSet(object.name) ? String(object.name) : "", - method: Array.isArray(object?.method) ? object.method.map((e: any) => MethodDescriptorProto.fromJSON(e)) : [], - options: isSet(object.options) ? ServiceOptions.fromJSON(object.options) : undefined, - }; - }, - - toJSON(message: ServiceDescriptorProto): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.method?.length) { - obj.method = message.method.map((e) => MethodDescriptorProto.toJSON(e)); - } - if (message.options !== undefined) { - obj.options = ServiceOptions.toJSON(message.options); - } - return obj; - }, - - create, I>>(base?: I): ServiceDescriptorProto { - return ServiceDescriptorProto.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): ServiceDescriptorProto { - const message = createBaseServiceDescriptorProto(); - message.name = object.name ?? ""; - message.method = object.method?.map((e) => MethodDescriptorProto.fromPartial(e)) || []; - message.options = (object.options !== undefined && object.options !== null) - ? ServiceOptions.fromPartial(object.options) - : undefined; - return message; - }, -}; - -function createBaseMethodDescriptorProto(): MethodDescriptorProto { - return { - name: "", - inputType: "", - outputType: "", - options: undefined, - clientStreaming: false, - serverStreaming: false, - }; -} - -export const MethodDescriptorProto = { - encode(message: MethodDescriptorProto, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.inputType !== "") { - writer.uint32(18).string(message.inputType); - } - if (message.outputType !== "") { - writer.uint32(26).string(message.outputType); - } - if (message.options !== undefined) { - MethodOptions.encode(message.options, writer.uint32(34).fork()).ldelim(); - } - if (message.clientStreaming === true) { - writer.uint32(40).bool(message.clientStreaming); - } - if (message.serverStreaming === true) { - writer.uint32(48).bool(message.serverStreaming); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MethodDescriptorProto { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMethodDescriptorProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.inputType = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.outputType = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.options = MethodOptions.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 40) { - break; - } - - message.clientStreaming = reader.bool(); - continue; - case 6: - if (tag !== 48) { - break; - } - - message.serverStreaming = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MethodDescriptorProto { - return { - name: isSet(object.name) ? String(object.name) : "", - inputType: isSet(object.inputType) ? String(object.inputType) : "", - outputType: isSet(object.outputType) ? String(object.outputType) : "", - options: isSet(object.options) ? MethodOptions.fromJSON(object.options) : undefined, - clientStreaming: isSet(object.clientStreaming) ? Boolean(object.clientStreaming) : false, - serverStreaming: isSet(object.serverStreaming) ? Boolean(object.serverStreaming) : false, - }; - }, - - toJSON(message: MethodDescriptorProto): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.inputType !== "") { - obj.inputType = message.inputType; - } - if (message.outputType !== "") { - obj.outputType = message.outputType; - } - if (message.options !== undefined) { - obj.options = MethodOptions.toJSON(message.options); - } - if (message.clientStreaming === true) { - obj.clientStreaming = message.clientStreaming; - } - if (message.serverStreaming === true) { - obj.serverStreaming = message.serverStreaming; - } - return obj; - }, - - create, I>>(base?: I): MethodDescriptorProto { - return MethodDescriptorProto.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): MethodDescriptorProto { - const message = createBaseMethodDescriptorProto(); - message.name = object.name ?? ""; - message.inputType = object.inputType ?? ""; - message.outputType = object.outputType ?? ""; - message.options = (object.options !== undefined && object.options !== null) - ? MethodOptions.fromPartial(object.options) - : undefined; - message.clientStreaming = object.clientStreaming ?? false; - message.serverStreaming = object.serverStreaming ?? false; - return message; - }, -}; - -function createBaseFileOptions(): FileOptions { - return { - javaPackage: "", - javaOuterClassname: "", - javaMultipleFiles: false, - javaGenerateEqualsAndHash: false, - javaStringCheckUtf8: false, - optimizeFor: 1, - goPackage: "", - ccGenericServices: false, - javaGenericServices: false, - pyGenericServices: false, - phpGenericServices: false, - deprecated: false, - ccEnableArenas: false, - objcClassPrefix: "", - csharpNamespace: "", - swiftPrefix: "", - phpClassPrefix: "", - phpNamespace: "", - phpMetadataNamespace: "", - rubyPackage: "", - uninterpretedOption: [], - }; -} - -export const FileOptions = { - encode(message: FileOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.javaPackage !== "") { - writer.uint32(10).string(message.javaPackage); - } - if (message.javaOuterClassname !== "") { - writer.uint32(66).string(message.javaOuterClassname); - } - if (message.javaMultipleFiles === true) { - writer.uint32(80).bool(message.javaMultipleFiles); - } - if (message.javaGenerateEqualsAndHash === true) { - writer.uint32(160).bool(message.javaGenerateEqualsAndHash); - } - if (message.javaStringCheckUtf8 === true) { - writer.uint32(216).bool(message.javaStringCheckUtf8); - } - if (message.optimizeFor !== 1) { - writer.uint32(72).int32(message.optimizeFor); - } - if (message.goPackage !== "") { - writer.uint32(90).string(message.goPackage); - } - if (message.ccGenericServices === true) { - writer.uint32(128).bool(message.ccGenericServices); - } - if (message.javaGenericServices === true) { - writer.uint32(136).bool(message.javaGenericServices); - } - if (message.pyGenericServices === true) { - writer.uint32(144).bool(message.pyGenericServices); - } - if (message.phpGenericServices === true) { - writer.uint32(336).bool(message.phpGenericServices); - } - if (message.deprecated === true) { - writer.uint32(184).bool(message.deprecated); - } - if (message.ccEnableArenas === true) { - writer.uint32(248).bool(message.ccEnableArenas); - } - if (message.objcClassPrefix !== "") { - writer.uint32(290).string(message.objcClassPrefix); - } - if (message.csharpNamespace !== "") { - writer.uint32(298).string(message.csharpNamespace); - } - if (message.swiftPrefix !== "") { - writer.uint32(314).string(message.swiftPrefix); - } - if (message.phpClassPrefix !== "") { - writer.uint32(322).string(message.phpClassPrefix); - } - if (message.phpNamespace !== "") { - writer.uint32(330).string(message.phpNamespace); - } - if (message.phpMetadataNamespace !== "") { - writer.uint32(354).string(message.phpMetadataNamespace); - } - if (message.rubyPackage !== "") { - writer.uint32(362).string(message.rubyPackage); - } - for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FileOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFileOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.javaPackage = reader.string(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.javaOuterClassname = reader.string(); - continue; - case 10: - if (tag !== 80) { - break; - } - - message.javaMultipleFiles = reader.bool(); - continue; - case 20: - if (tag !== 160) { - break; - } - - message.javaGenerateEqualsAndHash = reader.bool(); - continue; - case 27: - if (tag !== 216) { - break; - } - - message.javaStringCheckUtf8 = reader.bool(); - continue; - case 9: - if (tag !== 72) { - break; - } - - message.optimizeFor = reader.int32() as any; - continue; - case 11: - if (tag !== 90) { - break; - } - - message.goPackage = reader.string(); - continue; - case 16: - if (tag !== 128) { - break; - } - - message.ccGenericServices = reader.bool(); - continue; - case 17: - if (tag !== 136) { - break; - } - - message.javaGenericServices = reader.bool(); - continue; - case 18: - if (tag !== 144) { - break; - } - - message.pyGenericServices = reader.bool(); - continue; - case 42: - if (tag !== 336) { - break; - } - - message.phpGenericServices = reader.bool(); - continue; - case 23: - if (tag !== 184) { - break; - } - - message.deprecated = reader.bool(); - continue; - case 31: - if (tag !== 248) { - break; - } - - message.ccEnableArenas = reader.bool(); - continue; - case 36: - if (tag !== 290) { - break; - } - - message.objcClassPrefix = reader.string(); - continue; - case 37: - if (tag !== 298) { - break; - } - - message.csharpNamespace = reader.string(); - continue; - case 39: - if (tag !== 314) { - break; - } - - message.swiftPrefix = reader.string(); - continue; - case 40: - if (tag !== 322) { - break; - } - - message.phpClassPrefix = reader.string(); - continue; - case 41: - if (tag !== 330) { - break; - } - - message.phpNamespace = reader.string(); - continue; - case 44: - if (tag !== 354) { - break; - } - - message.phpMetadataNamespace = reader.string(); - continue; - case 45: - if (tag !== 362) { - break; - } - - message.rubyPackage = reader.string(); - continue; - case 999: - if (tag !== 7994) { - break; - } - - message.uninterpretedOption.push(UninterpretedOption.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FileOptions { - return { - javaPackage: isSet(object.javaPackage) ? String(object.javaPackage) : "", - javaOuterClassname: isSet(object.javaOuterClassname) ? String(object.javaOuterClassname) : "", - javaMultipleFiles: isSet(object.javaMultipleFiles) ? Boolean(object.javaMultipleFiles) : false, - javaGenerateEqualsAndHash: isSet(object.javaGenerateEqualsAndHash) - ? Boolean(object.javaGenerateEqualsAndHash) - : false, - javaStringCheckUtf8: isSet(object.javaStringCheckUtf8) ? Boolean(object.javaStringCheckUtf8) : false, - optimizeFor: isSet(object.optimizeFor) ? fileOptions_OptimizeModeFromJSON(object.optimizeFor) : 1, - goPackage: isSet(object.goPackage) ? String(object.goPackage) : "", - ccGenericServices: isSet(object.ccGenericServices) ? Boolean(object.ccGenericServices) : false, - javaGenericServices: isSet(object.javaGenericServices) ? Boolean(object.javaGenericServices) : false, - pyGenericServices: isSet(object.pyGenericServices) ? Boolean(object.pyGenericServices) : false, - phpGenericServices: isSet(object.phpGenericServices) ? Boolean(object.phpGenericServices) : false, - deprecated: isSet(object.deprecated) ? Boolean(object.deprecated) : false, - ccEnableArenas: isSet(object.ccEnableArenas) ? Boolean(object.ccEnableArenas) : false, - objcClassPrefix: isSet(object.objcClassPrefix) ? String(object.objcClassPrefix) : "", - csharpNamespace: isSet(object.csharpNamespace) ? String(object.csharpNamespace) : "", - swiftPrefix: isSet(object.swiftPrefix) ? String(object.swiftPrefix) : "", - phpClassPrefix: isSet(object.phpClassPrefix) ? String(object.phpClassPrefix) : "", - phpNamespace: isSet(object.phpNamespace) ? String(object.phpNamespace) : "", - phpMetadataNamespace: isSet(object.phpMetadataNamespace) ? String(object.phpMetadataNamespace) : "", - rubyPackage: isSet(object.rubyPackage) ? String(object.rubyPackage) : "", - uninterpretedOption: Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: FileOptions): unknown { - const obj: any = {}; - if (message.javaPackage !== "") { - obj.javaPackage = message.javaPackage; - } - if (message.javaOuterClassname !== "") { - obj.javaOuterClassname = message.javaOuterClassname; - } - if (message.javaMultipleFiles === true) { - obj.javaMultipleFiles = message.javaMultipleFiles; - } - if (message.javaGenerateEqualsAndHash === true) { - obj.javaGenerateEqualsAndHash = message.javaGenerateEqualsAndHash; - } - if (message.javaStringCheckUtf8 === true) { - obj.javaStringCheckUtf8 = message.javaStringCheckUtf8; - } - if (message.optimizeFor !== 1) { - obj.optimizeFor = fileOptions_OptimizeModeToJSON(message.optimizeFor); - } - if (message.goPackage !== "") { - obj.goPackage = message.goPackage; - } - if (message.ccGenericServices === true) { - obj.ccGenericServices = message.ccGenericServices; - } - if (message.javaGenericServices === true) { - obj.javaGenericServices = message.javaGenericServices; - } - if (message.pyGenericServices === true) { - obj.pyGenericServices = message.pyGenericServices; - } - if (message.phpGenericServices === true) { - obj.phpGenericServices = message.phpGenericServices; - } - if (message.deprecated === true) { - obj.deprecated = message.deprecated; - } - if (message.ccEnableArenas === true) { - obj.ccEnableArenas = message.ccEnableArenas; - } - if (message.objcClassPrefix !== "") { - obj.objcClassPrefix = message.objcClassPrefix; - } - if (message.csharpNamespace !== "") { - obj.csharpNamespace = message.csharpNamespace; - } - if (message.swiftPrefix !== "") { - obj.swiftPrefix = message.swiftPrefix; - } - if (message.phpClassPrefix !== "") { - obj.phpClassPrefix = message.phpClassPrefix; - } - if (message.phpNamespace !== "") { - obj.phpNamespace = message.phpNamespace; - } - if (message.phpMetadataNamespace !== "") { - obj.phpMetadataNamespace = message.phpMetadataNamespace; - } - if (message.rubyPackage !== "") { - obj.rubyPackage = message.rubyPackage; - } - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): FileOptions { - return FileOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): FileOptions { - const message = createBaseFileOptions(); - message.javaPackage = object.javaPackage ?? ""; - message.javaOuterClassname = object.javaOuterClassname ?? ""; - message.javaMultipleFiles = object.javaMultipleFiles ?? false; - message.javaGenerateEqualsAndHash = object.javaGenerateEqualsAndHash ?? false; - message.javaStringCheckUtf8 = object.javaStringCheckUtf8 ?? false; - message.optimizeFor = object.optimizeFor ?? 1; - message.goPackage = object.goPackage ?? ""; - message.ccGenericServices = object.ccGenericServices ?? false; - message.javaGenericServices = object.javaGenericServices ?? false; - message.pyGenericServices = object.pyGenericServices ?? false; - message.phpGenericServices = object.phpGenericServices ?? false; - message.deprecated = object.deprecated ?? false; - message.ccEnableArenas = object.ccEnableArenas ?? false; - message.objcClassPrefix = object.objcClassPrefix ?? ""; - message.csharpNamespace = object.csharpNamespace ?? ""; - message.swiftPrefix = object.swiftPrefix ?? ""; - message.phpClassPrefix = object.phpClassPrefix ?? ""; - message.phpNamespace = object.phpNamespace ?? ""; - message.phpMetadataNamespace = object.phpMetadataNamespace ?? ""; - message.rubyPackage = object.rubyPackage ?? ""; - message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseMessageOptions(): MessageOptions { - return { - messageSetWireFormat: false, - noStandardDescriptorAccessor: false, - deprecated: false, - mapEntry: false, - deprecatedLegacyJsonFieldConflicts: false, - uninterpretedOption: [], - }; -} - -export const MessageOptions = { - encode(message: MessageOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.messageSetWireFormat === true) { - writer.uint32(8).bool(message.messageSetWireFormat); - } - if (message.noStandardDescriptorAccessor === true) { - writer.uint32(16).bool(message.noStandardDescriptorAccessor); - } - if (message.deprecated === true) { - writer.uint32(24).bool(message.deprecated); - } - if (message.mapEntry === true) { - writer.uint32(56).bool(message.mapEntry); - } - if (message.deprecatedLegacyJsonFieldConflicts === true) { - writer.uint32(88).bool(message.deprecatedLegacyJsonFieldConflicts); - } - for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MessageOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMessageOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.messageSetWireFormat = reader.bool(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.noStandardDescriptorAccessor = reader.bool(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.deprecated = reader.bool(); - continue; - case 7: - if (tag !== 56) { - break; - } - - message.mapEntry = reader.bool(); - continue; - case 11: - if (tag !== 88) { - break; - } - - message.deprecatedLegacyJsonFieldConflicts = reader.bool(); - continue; - case 999: - if (tag !== 7994) { - break; - } - - message.uninterpretedOption.push(UninterpretedOption.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MessageOptions { - return { - messageSetWireFormat: isSet(object.messageSetWireFormat) ? Boolean(object.messageSetWireFormat) : false, - noStandardDescriptorAccessor: isSet(object.noStandardDescriptorAccessor) - ? Boolean(object.noStandardDescriptorAccessor) - : false, - deprecated: isSet(object.deprecated) ? Boolean(object.deprecated) : false, - mapEntry: isSet(object.mapEntry) ? Boolean(object.mapEntry) : false, - deprecatedLegacyJsonFieldConflicts: isSet(object.deprecatedLegacyJsonFieldConflicts) - ? Boolean(object.deprecatedLegacyJsonFieldConflicts) - : false, - uninterpretedOption: Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: MessageOptions): unknown { - const obj: any = {}; - if (message.messageSetWireFormat === true) { - obj.messageSetWireFormat = message.messageSetWireFormat; - } - if (message.noStandardDescriptorAccessor === true) { - obj.noStandardDescriptorAccessor = message.noStandardDescriptorAccessor; - } - if (message.deprecated === true) { - obj.deprecated = message.deprecated; - } - if (message.mapEntry === true) { - obj.mapEntry = message.mapEntry; - } - if (message.deprecatedLegacyJsonFieldConflicts === true) { - obj.deprecatedLegacyJsonFieldConflicts = message.deprecatedLegacyJsonFieldConflicts; - } - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): MessageOptions { - return MessageOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): MessageOptions { - const message = createBaseMessageOptions(); - message.messageSetWireFormat = object.messageSetWireFormat ?? false; - message.noStandardDescriptorAccessor = object.noStandardDescriptorAccessor ?? false; - message.deprecated = object.deprecated ?? false; - message.mapEntry = object.mapEntry ?? false; - message.deprecatedLegacyJsonFieldConflicts = object.deprecatedLegacyJsonFieldConflicts ?? false; - message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseFieldOptions(): FieldOptions { - return { - ctype: 0, - packed: false, - jstype: 0, - lazy: false, - unverifiedLazy: false, - deprecated: false, - weak: false, - debugRedact: false, - retention: 0, - target: 0, - uninterpretedOption: [], - }; -} - -export const FieldOptions = { - encode(message: FieldOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.ctype !== 0) { - writer.uint32(8).int32(message.ctype); - } - if (message.packed === true) { - writer.uint32(16).bool(message.packed); - } - if (message.jstype !== 0) { - writer.uint32(48).int32(message.jstype); - } - if (message.lazy === true) { - writer.uint32(40).bool(message.lazy); - } - if (message.unverifiedLazy === true) { - writer.uint32(120).bool(message.unverifiedLazy); - } - if (message.deprecated === true) { - writer.uint32(24).bool(message.deprecated); - } - if (message.weak === true) { - writer.uint32(80).bool(message.weak); - } - if (message.debugRedact === true) { - writer.uint32(128).bool(message.debugRedact); - } - if (message.retention !== 0) { - writer.uint32(136).int32(message.retention); - } - if (message.target !== 0) { - writer.uint32(144).int32(message.target); - } - for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FieldOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFieldOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.ctype = reader.int32() as any; - continue; - case 2: - if (tag !== 16) { - break; - } - - message.packed = reader.bool(); - continue; - case 6: - if (tag !== 48) { - break; - } - - message.jstype = reader.int32() as any; - continue; - case 5: - if (tag !== 40) { - break; - } - - message.lazy = reader.bool(); - continue; - case 15: - if (tag !== 120) { - break; - } - - message.unverifiedLazy = reader.bool(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.deprecated = reader.bool(); - continue; - case 10: - if (tag !== 80) { - break; - } - - message.weak = reader.bool(); - continue; - case 16: - if (tag !== 128) { - break; - } - - message.debugRedact = reader.bool(); - continue; - case 17: - if (tag !== 136) { - break; - } - - message.retention = reader.int32() as any; - continue; - case 18: - if (tag !== 144) { - break; - } - - message.target = reader.int32() as any; - continue; - case 999: - if (tag !== 7994) { - break; - } - - message.uninterpretedOption.push(UninterpretedOption.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FieldOptions { - return { - ctype: isSet(object.ctype) ? fieldOptions_CTypeFromJSON(object.ctype) : 0, - packed: isSet(object.packed) ? Boolean(object.packed) : false, - jstype: isSet(object.jstype) ? fieldOptions_JSTypeFromJSON(object.jstype) : 0, - lazy: isSet(object.lazy) ? Boolean(object.lazy) : false, - unverifiedLazy: isSet(object.unverifiedLazy) ? Boolean(object.unverifiedLazy) : false, - deprecated: isSet(object.deprecated) ? Boolean(object.deprecated) : false, - weak: isSet(object.weak) ? Boolean(object.weak) : false, - debugRedact: isSet(object.debugRedact) ? Boolean(object.debugRedact) : false, - retention: isSet(object.retention) ? fieldOptions_OptionRetentionFromJSON(object.retention) : 0, - target: isSet(object.target) ? fieldOptions_OptionTargetTypeFromJSON(object.target) : 0, - uninterpretedOption: Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: FieldOptions): unknown { - const obj: any = {}; - if (message.ctype !== 0) { - obj.ctype = fieldOptions_CTypeToJSON(message.ctype); - } - if (message.packed === true) { - obj.packed = message.packed; - } - if (message.jstype !== 0) { - obj.jstype = fieldOptions_JSTypeToJSON(message.jstype); - } - if (message.lazy === true) { - obj.lazy = message.lazy; - } - if (message.unverifiedLazy === true) { - obj.unverifiedLazy = message.unverifiedLazy; - } - if (message.deprecated === true) { - obj.deprecated = message.deprecated; - } - if (message.weak === true) { - obj.weak = message.weak; - } - if (message.debugRedact === true) { - obj.debugRedact = message.debugRedact; - } - if (message.retention !== 0) { - obj.retention = fieldOptions_OptionRetentionToJSON(message.retention); - } - if (message.target !== 0) { - obj.target = fieldOptions_OptionTargetTypeToJSON(message.target); - } - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): FieldOptions { - return FieldOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): FieldOptions { - const message = createBaseFieldOptions(); - message.ctype = object.ctype ?? 0; - message.packed = object.packed ?? false; - message.jstype = object.jstype ?? 0; - message.lazy = object.lazy ?? false; - message.unverifiedLazy = object.unverifiedLazy ?? false; - message.deprecated = object.deprecated ?? false; - message.weak = object.weak ?? false; - message.debugRedact = object.debugRedact ?? false; - message.retention = object.retention ?? 0; - message.target = object.target ?? 0; - message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseOneofOptions(): OneofOptions { - return { uninterpretedOption: [] }; -} - -export const OneofOptions = { - encode(message: OneofOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OneofOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOneofOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 999: - if (tag !== 7994) { - break; - } - - message.uninterpretedOption.push(UninterpretedOption.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): OneofOptions { - return { - uninterpretedOption: Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: OneofOptions): unknown { - const obj: any = {}; - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): OneofOptions { - return OneofOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): OneofOptions { - const message = createBaseOneofOptions(); - message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseEnumOptions(): EnumOptions { - return { allowAlias: false, deprecated: false, deprecatedLegacyJsonFieldConflicts: false, uninterpretedOption: [] }; -} - -export const EnumOptions = { - encode(message: EnumOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.allowAlias === true) { - writer.uint32(16).bool(message.allowAlias); - } - if (message.deprecated === true) { - writer.uint32(24).bool(message.deprecated); - } - if (message.deprecatedLegacyJsonFieldConflicts === true) { - writer.uint32(48).bool(message.deprecatedLegacyJsonFieldConflicts); - } - for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnumOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnumOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 2: - if (tag !== 16) { - break; - } - - message.allowAlias = reader.bool(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.deprecated = reader.bool(); - continue; - case 6: - if (tag !== 48) { - break; - } - - message.deprecatedLegacyJsonFieldConflicts = reader.bool(); - continue; - case 999: - if (tag !== 7994) { - break; - } - - message.uninterpretedOption.push(UninterpretedOption.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnumOptions { - return { - allowAlias: isSet(object.allowAlias) ? Boolean(object.allowAlias) : false, - deprecated: isSet(object.deprecated) ? Boolean(object.deprecated) : false, - deprecatedLegacyJsonFieldConflicts: isSet(object.deprecatedLegacyJsonFieldConflicts) - ? Boolean(object.deprecatedLegacyJsonFieldConflicts) - : false, - uninterpretedOption: Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: EnumOptions): unknown { - const obj: any = {}; - if (message.allowAlias === true) { - obj.allowAlias = message.allowAlias; - } - if (message.deprecated === true) { - obj.deprecated = message.deprecated; - } - if (message.deprecatedLegacyJsonFieldConflicts === true) { - obj.deprecatedLegacyJsonFieldConflicts = message.deprecatedLegacyJsonFieldConflicts; - } - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): EnumOptions { - return EnumOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EnumOptions { - const message = createBaseEnumOptions(); - message.allowAlias = object.allowAlias ?? false; - message.deprecated = object.deprecated ?? false; - message.deprecatedLegacyJsonFieldConflicts = object.deprecatedLegacyJsonFieldConflicts ?? false; - message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseEnumValueOptions(): EnumValueOptions { - return { deprecated: false, uninterpretedOption: [] }; -} - -export const EnumValueOptions = { - encode(message: EnumValueOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.deprecated === true) { - writer.uint32(8).bool(message.deprecated); - } - for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EnumValueOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEnumValueOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.deprecated = reader.bool(); - continue; - case 999: - if (tag !== 7994) { - break; - } - - message.uninterpretedOption.push(UninterpretedOption.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EnumValueOptions { - return { - deprecated: isSet(object.deprecated) ? Boolean(object.deprecated) : false, - uninterpretedOption: Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: EnumValueOptions): unknown { - const obj: any = {}; - if (message.deprecated === true) { - obj.deprecated = message.deprecated; - } - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): EnumValueOptions { - return EnumValueOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EnumValueOptions { - const message = createBaseEnumValueOptions(); - message.deprecated = object.deprecated ?? false; - message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseServiceOptions(): ServiceOptions { - return { deprecated: false, uninterpretedOption: [] }; -} - -export const ServiceOptions = { - encode(message: ServiceOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.deprecated === true) { - writer.uint32(264).bool(message.deprecated); - } - for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 33: - if (tag !== 264) { - break; - } - - message.deprecated = reader.bool(); - continue; - case 999: - if (tag !== 7994) { - break; - } - - message.uninterpretedOption.push(UninterpretedOption.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceOptions { - return { - deprecated: isSet(object.deprecated) ? Boolean(object.deprecated) : false, - uninterpretedOption: Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ServiceOptions): unknown { - const obj: any = {}; - if (message.deprecated === true) { - obj.deprecated = message.deprecated; - } - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): ServiceOptions { - return ServiceOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): ServiceOptions { - const message = createBaseServiceOptions(); - message.deprecated = object.deprecated ?? false; - message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseMethodOptions(): MethodOptions { - return { deprecated: false, idempotencyLevel: 0, uninterpretedOption: [] }; -} - -export const MethodOptions = { - encode(message: MethodOptions, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.deprecated === true) { - writer.uint32(264).bool(message.deprecated); - } - if (message.idempotencyLevel !== 0) { - writer.uint32(272).int32(message.idempotencyLevel); - } - for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MethodOptions { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMethodOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 33: - if (tag !== 264) { - break; - } - - message.deprecated = reader.bool(); - continue; - case 34: - if (tag !== 272) { - break; - } - - message.idempotencyLevel = reader.int32() as any; - continue; - case 999: - if (tag !== 7994) { - break; - } - - message.uninterpretedOption.push(UninterpretedOption.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MethodOptions { - return { - deprecated: isSet(object.deprecated) ? Boolean(object.deprecated) : false, - idempotencyLevel: isSet(object.idempotencyLevel) - ? methodOptions_IdempotencyLevelFromJSON(object.idempotencyLevel) - : 0, - uninterpretedOption: Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: MethodOptions): unknown { - const obj: any = {}; - if (message.deprecated === true) { - obj.deprecated = message.deprecated; - } - if (message.idempotencyLevel !== 0) { - obj.idempotencyLevel = methodOptions_IdempotencyLevelToJSON(message.idempotencyLevel); - } - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): MethodOptions { - return MethodOptions.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): MethodOptions { - const message = createBaseMethodOptions(); - message.deprecated = object.deprecated ?? false; - message.idempotencyLevel = object.idempotencyLevel ?? 0; - message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseUninterpretedOption(): UninterpretedOption { - return { - name: [], - identifierValue: "", - positiveIntValue: 0, - negativeIntValue: 0, - doubleValue: 0, - stringValue: new Uint8Array(0), - aggregateValue: "", - }; -} - -export const UninterpretedOption = { - encode(message: UninterpretedOption, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.name) { - UninterpretedOption_NamePart.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.identifierValue !== "") { - writer.uint32(26).string(message.identifierValue); - } - if (message.positiveIntValue !== 0) { - writer.uint32(32).uint64(message.positiveIntValue); - } - if (message.negativeIntValue !== 0) { - writer.uint32(40).int64(message.negativeIntValue); - } - if (message.doubleValue !== 0) { - writer.uint32(49).double(message.doubleValue); - } - if (message.stringValue.length !== 0) { - writer.uint32(58).bytes(message.stringValue); - } - if (message.aggregateValue !== "") { - writer.uint32(66).string(message.aggregateValue); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): UninterpretedOption { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseUninterpretedOption(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 2: - if (tag !== 18) { - break; - } - - message.name.push(UninterpretedOption_NamePart.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.identifierValue = reader.string(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.positiveIntValue = longToNumber(reader.uint64() as Long); - continue; - case 5: - if (tag !== 40) { - break; - } - - message.negativeIntValue = longToNumber(reader.int64() as Long); - continue; - case 6: - if (tag !== 49) { - break; - } - - message.doubleValue = reader.double(); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.stringValue = reader.bytes(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.aggregateValue = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): UninterpretedOption { - return { - name: Array.isArray(object?.name) ? object.name.map((e: any) => UninterpretedOption_NamePart.fromJSON(e)) : [], - identifierValue: isSet(object.identifierValue) ? String(object.identifierValue) : "", - positiveIntValue: isSet(object.positiveIntValue) ? Number(object.positiveIntValue) : 0, - negativeIntValue: isSet(object.negativeIntValue) ? Number(object.negativeIntValue) : 0, - doubleValue: isSet(object.doubleValue) ? Number(object.doubleValue) : 0, - stringValue: isSet(object.stringValue) ? bytesFromBase64(object.stringValue) : new Uint8Array(0), - aggregateValue: isSet(object.aggregateValue) ? String(object.aggregateValue) : "", - }; - }, - - toJSON(message: UninterpretedOption): unknown { - const obj: any = {}; - if (message.name?.length) { - obj.name = message.name.map((e) => UninterpretedOption_NamePart.toJSON(e)); - } - if (message.identifierValue !== "") { - obj.identifierValue = message.identifierValue; - } - if (message.positiveIntValue !== 0) { - obj.positiveIntValue = Math.round(message.positiveIntValue); - } - if (message.negativeIntValue !== 0) { - obj.negativeIntValue = Math.round(message.negativeIntValue); - } - if (message.doubleValue !== 0) { - obj.doubleValue = message.doubleValue; - } - if (message.stringValue.length !== 0) { - obj.stringValue = base64FromBytes(message.stringValue); - } - if (message.aggregateValue !== "") { - obj.aggregateValue = message.aggregateValue; - } - return obj; - }, - - create, I>>(base?: I): UninterpretedOption { - return UninterpretedOption.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): UninterpretedOption { - const message = createBaseUninterpretedOption(); - message.name = object.name?.map((e) => UninterpretedOption_NamePart.fromPartial(e)) || []; - message.identifierValue = object.identifierValue ?? ""; - message.positiveIntValue = object.positiveIntValue ?? 0; - message.negativeIntValue = object.negativeIntValue ?? 0; - message.doubleValue = object.doubleValue ?? 0; - message.stringValue = object.stringValue ?? new Uint8Array(0); - message.aggregateValue = object.aggregateValue ?? ""; - return message; - }, -}; - -function createBaseUninterpretedOption_NamePart(): UninterpretedOption_NamePart { - return { namePart: "", isExtension: false }; -} - -export const UninterpretedOption_NamePart = { - encode(message: UninterpretedOption_NamePart, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.namePart !== "") { - writer.uint32(10).string(message.namePart); - } - if (message.isExtension === true) { - writer.uint32(16).bool(message.isExtension); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): UninterpretedOption_NamePart { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseUninterpretedOption_NamePart(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.namePart = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.isExtension = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): UninterpretedOption_NamePart { - return { - namePart: isSet(object.namePart) ? String(object.namePart) : "", - isExtension: isSet(object.isExtension) ? Boolean(object.isExtension) : false, - }; - }, - - toJSON(message: UninterpretedOption_NamePart): unknown { - const obj: any = {}; - if (message.namePart !== "") { - obj.namePart = message.namePart; - } - if (message.isExtension === true) { - obj.isExtension = message.isExtension; - } - return obj; - }, - - create, I>>(base?: I): UninterpretedOption_NamePart { - return UninterpretedOption_NamePart.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): UninterpretedOption_NamePart { - const message = createBaseUninterpretedOption_NamePart(); - message.namePart = object.namePart ?? ""; - message.isExtension = object.isExtension ?? false; - return message; - }, -}; - -function createBaseSourceCodeInfo(): SourceCodeInfo { - return { location: [] }; -} - -export const SourceCodeInfo = { - encode(message: SourceCodeInfo, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.location) { - SourceCodeInfo_Location.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SourceCodeInfo { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSourceCodeInfo(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.location.push(SourceCodeInfo_Location.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SourceCodeInfo { - return { - location: Array.isArray(object?.location) - ? object.location.map((e: any) => SourceCodeInfo_Location.fromJSON(e)) - : [], - }; - }, - - toJSON(message: SourceCodeInfo): unknown { - const obj: any = {}; - if (message.location?.length) { - obj.location = message.location.map((e) => SourceCodeInfo_Location.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): SourceCodeInfo { - return SourceCodeInfo.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SourceCodeInfo { - const message = createBaseSourceCodeInfo(); - message.location = object.location?.map((e) => SourceCodeInfo_Location.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSourceCodeInfo_Location(): SourceCodeInfo_Location { - return { path: [], span: [], leadingComments: "", trailingComments: "", leadingDetachedComments: [] }; -} - -export const SourceCodeInfo_Location = { - encode(message: SourceCodeInfo_Location, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - writer.uint32(10).fork(); - for (const v of message.path) { - writer.int32(v); - } - writer.ldelim(); - writer.uint32(18).fork(); - for (const v of message.span) { - writer.int32(v); - } - writer.ldelim(); - if (message.leadingComments !== "") { - writer.uint32(26).string(message.leadingComments); - } - if (message.trailingComments !== "") { - writer.uint32(34).string(message.trailingComments); - } - for (const v of message.leadingDetachedComments) { - writer.uint32(50).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SourceCodeInfo_Location { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSourceCodeInfo_Location(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag === 8) { - message.path.push(reader.int32()); - - continue; - } - - if (tag === 10) { - const end2 = reader.uint32() + reader.pos; - while (reader.pos < end2) { - message.path.push(reader.int32()); - } - - continue; - } - - break; - case 2: - if (tag === 16) { - message.span.push(reader.int32()); - - continue; - } - - if (tag === 18) { - const end2 = reader.uint32() + reader.pos; - while (reader.pos < end2) { - message.span.push(reader.int32()); - } - - continue; - } - - break; - case 3: - if (tag !== 26) { - break; - } - - message.leadingComments = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.trailingComments = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.leadingDetachedComments.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SourceCodeInfo_Location { - return { - path: Array.isArray(object?.path) ? object.path.map((e: any) => Number(e)) : [], - span: Array.isArray(object?.span) ? object.span.map((e: any) => Number(e)) : [], - leadingComments: isSet(object.leadingComments) ? String(object.leadingComments) : "", - trailingComments: isSet(object.trailingComments) ? String(object.trailingComments) : "", - leadingDetachedComments: Array.isArray(object?.leadingDetachedComments) - ? object.leadingDetachedComments.map((e: any) => String(e)) - : [], - }; - }, - - toJSON(message: SourceCodeInfo_Location): unknown { - const obj: any = {}; - if (message.path?.length) { - obj.path = message.path.map((e) => Math.round(e)); - } - if (message.span?.length) { - obj.span = message.span.map((e) => Math.round(e)); - } - if (message.leadingComments !== "") { - obj.leadingComments = message.leadingComments; - } - if (message.trailingComments !== "") { - obj.trailingComments = message.trailingComments; - } - if (message.leadingDetachedComments?.length) { - obj.leadingDetachedComments = message.leadingDetachedComments; - } - return obj; - }, - - create, I>>(base?: I): SourceCodeInfo_Location { - return SourceCodeInfo_Location.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SourceCodeInfo_Location { - const message = createBaseSourceCodeInfo_Location(); - message.path = object.path?.map((e) => e) || []; - message.span = object.span?.map((e) => e) || []; - message.leadingComments = object.leadingComments ?? ""; - message.trailingComments = object.trailingComments ?? ""; - message.leadingDetachedComments = object.leadingDetachedComments?.map((e) => e) || []; - return message; - }, -}; - -function createBaseGeneratedCodeInfo(): GeneratedCodeInfo { - return { annotation: [] }; -} - -export const GeneratedCodeInfo = { - encode(message: GeneratedCodeInfo, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.annotation) { - GeneratedCodeInfo_Annotation.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GeneratedCodeInfo { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGeneratedCodeInfo(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.annotation.push(GeneratedCodeInfo_Annotation.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GeneratedCodeInfo { - return { - annotation: Array.isArray(object?.annotation) - ? object.annotation.map((e: any) => GeneratedCodeInfo_Annotation.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GeneratedCodeInfo): unknown { - const obj: any = {}; - if (message.annotation?.length) { - obj.annotation = message.annotation.map((e) => GeneratedCodeInfo_Annotation.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): GeneratedCodeInfo { - return GeneratedCodeInfo.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): GeneratedCodeInfo { - const message = createBaseGeneratedCodeInfo(); - message.annotation = object.annotation?.map((e) => GeneratedCodeInfo_Annotation.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseGeneratedCodeInfo_Annotation(): GeneratedCodeInfo_Annotation { - return { path: [], sourceFile: "", begin: 0, end: 0, semantic: 0 }; -} - -export const GeneratedCodeInfo_Annotation = { - encode(message: GeneratedCodeInfo_Annotation, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - writer.uint32(10).fork(); - for (const v of message.path) { - writer.int32(v); - } - writer.ldelim(); - if (message.sourceFile !== "") { - writer.uint32(18).string(message.sourceFile); - } - if (message.begin !== 0) { - writer.uint32(24).int32(message.begin); - } - if (message.end !== 0) { - writer.uint32(32).int32(message.end); - } - if (message.semantic !== 0) { - writer.uint32(40).int32(message.semantic); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GeneratedCodeInfo_Annotation { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGeneratedCodeInfo_Annotation(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag === 8) { - message.path.push(reader.int32()); - - continue; - } - - if (tag === 10) { - const end2 = reader.uint32() + reader.pos; - while (reader.pos < end2) { - message.path.push(reader.int32()); - } - - continue; - } - - break; - case 2: - if (tag !== 18) { - break; - } - - message.sourceFile = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.begin = reader.int32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.end = reader.int32(); - continue; - case 5: - if (tag !== 40) { - break; - } - - message.semantic = reader.int32() as any; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GeneratedCodeInfo_Annotation { - return { - path: Array.isArray(object?.path) ? object.path.map((e: any) => Number(e)) : [], - sourceFile: isSet(object.sourceFile) ? String(object.sourceFile) : "", - begin: isSet(object.begin) ? Number(object.begin) : 0, - end: isSet(object.end) ? Number(object.end) : 0, - semantic: isSet(object.semantic) ? generatedCodeInfo_Annotation_SemanticFromJSON(object.semantic) : 0, - }; - }, - - toJSON(message: GeneratedCodeInfo_Annotation): unknown { - const obj: any = {}; - if (message.path?.length) { - obj.path = message.path.map((e) => Math.round(e)); - } - if (message.sourceFile !== "") { - obj.sourceFile = message.sourceFile; - } - if (message.begin !== 0) { - obj.begin = Math.round(message.begin); - } - if (message.end !== 0) { - obj.end = Math.round(message.end); - } - if (message.semantic !== 0) { - obj.semantic = generatedCodeInfo_Annotation_SemanticToJSON(message.semantic); - } - return obj; - }, - - create, I>>(base?: I): GeneratedCodeInfo_Annotation { - return GeneratedCodeInfo_Annotation.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): GeneratedCodeInfo_Annotation { - const message = createBaseGeneratedCodeInfo_Annotation(); - message.path = object.path?.map((e) => e) || []; - message.sourceFile = object.sourceFile ?? ""; - message.begin = object.begin ?? 0; - message.end = object.end ?? 0; - message.semantic = object.semantic ?? 0; - return message; - }, -}; - -declare const self: any | undefined; -declare const window: any | undefined; -declare const global: any | undefined; -const tsProtoGlobalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (tsProtoGlobalThis.Buffer) { - return Uint8Array.from(tsProtoGlobalThis.Buffer.from(b64, "base64")); - } else { - const bin = tsProtoGlobalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (tsProtoGlobalThis.Buffer) { - return tsProtoGlobalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return tsProtoGlobalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function longToNumber(long: Long): number { - if (long.gt(Number.MAX_SAFE_INTEGER)) { - throw new tsProtoGlobalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); - } - return long.toNumber(); -} - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ui/grpc_generated/google/protobuf/timestamp.ts b/ui/grpc_generated/google/protobuf/timestamp.ts deleted file mode 100644 index 560af8a4e0..0000000000 --- a/ui/grpc_generated/google/protobuf/timestamp.ts +++ /dev/null @@ -1,232 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; - -export const protobufPackage = "google.protobuf"; - -/** - * A Timestamp represents a point in time independent of any time zone or local - * calendar, encoded as a count of seconds and fractions of seconds at - * nanosecond resolution. The count is relative to an epoch at UTC midnight on - * January 1, 1970, in the proleptic Gregorian calendar which extends the - * Gregorian calendar backwards to year one. - * - * All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap - * second table is needed for interpretation, using a [24-hour linear - * smear](https://developers.google.com/time/smear). - * - * The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By - * restricting to that range, we ensure that we can convert to and from [RFC - * 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. - * - * # Examples - * - * Example 1: Compute Timestamp from POSIX `time()`. - * - * Timestamp timestamp; - * timestamp.set_seconds(time(NULL)); - * timestamp.set_nanos(0); - * - * Example 2: Compute Timestamp from POSIX `gettimeofday()`. - * - * struct timeval tv; - * gettimeofday(&tv, NULL); - * - * Timestamp timestamp; - * timestamp.set_seconds(tv.tv_sec); - * timestamp.set_nanos(tv.tv_usec * 1000); - * - * Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. - * - * FILETIME ft; - * GetSystemTimeAsFileTime(&ft); - * UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; - * - * // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z - * // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. - * Timestamp timestamp; - * timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); - * timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); - * - * Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. - * - * long millis = System.currentTimeMillis(); - * - * Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) - * .setNanos((int) ((millis % 1000) * 1000000)).build(); - * - * Example 5: Compute Timestamp from Java `Instant.now()`. - * - * Instant now = Instant.now(); - * - * Timestamp timestamp = - * Timestamp.newBuilder().setSeconds(now.getEpochSecond()) - * .setNanos(now.getNano()).build(); - * - * Example 6: Compute Timestamp from current time in Python. - * - * timestamp = Timestamp() - * timestamp.GetCurrentTime() - * - * # JSON Mapping - * - * In JSON format, the Timestamp type is encoded as a string in the - * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the - * format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" - * where {year} is always expressed using four digits while {month}, {day}, - * {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional - * seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), - * are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone - * is required. A proto3 JSON serializer should always use UTC (as indicated by - * "Z") when printing the Timestamp type and a proto3 JSON parser should be - * able to accept both UTC and other timezones (as indicated by an offset). - * - * For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past - * 01:30 UTC on January 15, 2017. - * - * In JavaScript, one can convert a Date object to this format using the - * standard - * [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) - * method. In Python, a standard `datetime.datetime` object can be converted - * to this format using - * [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with - * the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use - * the Joda Time's [`ISODateTimeFormat.dateTime()`]( - * http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D - * ) to obtain a formatter capable of generating timestamps in this format. - */ -export interface Timestamp { - /** - * Represents seconds of UTC time since Unix epoch - * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - * 9999-12-31T23:59:59Z inclusive. - */ - seconds: number; - /** - * Non-negative fractions of a second at nanosecond resolution. Negative - * second values with fractions must still have non-negative nanos values - * that count forward in time. Must be from 0 to 999,999,999 - * inclusive. - */ - nanos: number; -} - -function createBaseTimestamp(): Timestamp { - return { seconds: 0, nanos: 0 }; -} - -export const Timestamp = { - encode(message: Timestamp, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.seconds !== 0) { - writer.uint32(8).int64(message.seconds); - } - if (message.nanos !== 0) { - writer.uint32(16).int32(message.nanos); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Timestamp { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTimestamp(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.seconds = longToNumber(reader.int64() as Long); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.nanos = reader.int32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Timestamp { - return { - seconds: isSet(object.seconds) ? Number(object.seconds) : 0, - nanos: isSet(object.nanos) ? Number(object.nanos) : 0, - }; - }, - - toJSON(message: Timestamp): unknown { - const obj: any = {}; - if (message.seconds !== 0) { - obj.seconds = Math.round(message.seconds); - } - if (message.nanos !== 0) { - obj.nanos = Math.round(message.nanos); - } - return obj; - }, - - create, I>>(base?: I): Timestamp { - return Timestamp.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): Timestamp { - const message = createBaseTimestamp(); - message.seconds = object.seconds ?? 0; - message.nanos = object.nanos ?? 0; - return message; - }, -}; - -declare const self: any | undefined; -declare const window: any | undefined; -declare const global: any | undefined; -const tsProtoGlobalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function longToNumber(long: Long): number { - if (long.gt(Number.MAX_SAFE_INTEGER)) { - throw new tsProtoGlobalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); - } - return long.toNumber(); -} - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ui/grpc_generated/peers.ts b/ui/grpc_generated/peers.ts deleted file mode 100644 index 7cdd2ca50b..0000000000 --- a/ui/grpc_generated/peers.ts +++ /dev/null @@ -1,1933 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; - -export const protobufPackage = "peerdb_peers"; - -export enum DBType { - BIGQUERY = 0, - SNOWFLAKE = 1, - MONGO = 2, - POSTGRES = 3, - EVENTHUB = 4, - S3 = 5, - SQLSERVER = 6, - EVENTHUB_GROUP = 7, - UNRECOGNIZED = -1, -} - -export function dBTypeFromJSON(object: any): DBType { - switch (object) { - case 0: - case "BIGQUERY": - return DBType.BIGQUERY; - case 1: - case "SNOWFLAKE": - return DBType.SNOWFLAKE; - case 2: - case "MONGO": - return DBType.MONGO; - case 3: - case "POSTGRES": - return DBType.POSTGRES; - case 4: - case "EVENTHUB": - return DBType.EVENTHUB; - case 5: - case "S3": - return DBType.S3; - case 6: - case "SQLSERVER": - return DBType.SQLSERVER; - case 7: - case "EVENTHUB_GROUP": - return DBType.EVENTHUB_GROUP; - case -1: - case "UNRECOGNIZED": - default: - return DBType.UNRECOGNIZED; - } -} - -export function dBTypeToJSON(object: DBType): string { - switch (object) { - case DBType.BIGQUERY: - return "BIGQUERY"; - case DBType.SNOWFLAKE: - return "SNOWFLAKE"; - case DBType.MONGO: - return "MONGO"; - case DBType.POSTGRES: - return "POSTGRES"; - case DBType.EVENTHUB: - return "EVENTHUB"; - case DBType.S3: - return "S3"; - case DBType.SQLSERVER: - return "SQLSERVER"; - case DBType.EVENTHUB_GROUP: - return "EVENTHUB_GROUP"; - case DBType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface SSHConfig { - host: string; - port: number; - user: string; - password: string; - privateKey: string; -} - -export interface SnowflakeConfig { - accountId: string; - username: string; - privateKey: string; - database: string; - warehouse: string; - role: string; - queryTimeout: number; - s3Integration: string; - password?: - | string - | undefined; - /** defaults to _PEERDB_INTERNAL */ - metadataSchema?: string | undefined; -} - -export interface BigqueryConfig { - authType: string; - projectId: string; - privateKeyId: string; - privateKey: string; - clientEmail: string; - clientId: string; - authUri: string; - tokenUri: string; - authProviderX509CertUrl: string; - clientX509CertUrl: string; - datasetId: string; -} - -export interface MongoConfig { - username: string; - password: string; - clusterurl: string; - clusterport: number; - database: string; -} - -export interface PostgresConfig { - host: string; - port: number; - user: string; - password: string; - database: string; - /** this is used only in query replication mode right now. */ - transactionSnapshot: string; - /** defaults to _peerdb_internal */ - metadataSchema?: string | undefined; - sshConfig?: SSHConfig | undefined; -} - -export interface EventHubConfig { - namespace: string; - resourceGroup: string; - location: string; - metadataDb: - | PostgresConfig - | undefined; - /** if this is empty PeerDB uses `AZURE_SUBSCRIPTION_ID` environment variable. */ - subscriptionId: string; - /** defaults to 3 */ - partitionCount: number; - /** defaults to 7 */ - messageRetentionInDays: number; -} - -export interface EventHubGroupConfig { - /** event hub peer name to event hub config */ - eventhubs: { [key: string]: EventHubConfig }; - metadataDb: PostgresConfig | undefined; - unnestColumns: string[]; -} - -export interface EventHubGroupConfig_EventhubsEntry { - key: string; - value: EventHubConfig | undefined; -} - -export interface S3Config { - url: string; - accessKeyId?: string | undefined; - secretAccessKey?: string | undefined; - roleArn?: string | undefined; - region?: string | undefined; - endpoint?: string | undefined; - metadataDb: PostgresConfig | undefined; -} - -export interface SqlServerConfig { - server: string; - port: number; - user: string; - password: string; - database: string; -} - -export interface Peer { - name: string; - type: DBType; - snowflakeConfig?: SnowflakeConfig | undefined; - bigqueryConfig?: BigqueryConfig | undefined; - mongoConfig?: MongoConfig | undefined; - postgresConfig?: PostgresConfig | undefined; - eventhubConfig?: EventHubConfig | undefined; - s3Config?: S3Config | undefined; - sqlserverConfig?: SqlServerConfig | undefined; - eventhubGroupConfig?: EventHubGroupConfig | undefined; -} - -function createBaseSSHConfig(): SSHConfig { - return { host: "", port: 0, user: "", password: "", privateKey: "" }; -} - -export const SSHConfig = { - encode(message: SSHConfig, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.host !== "") { - writer.uint32(10).string(message.host); - } - if (message.port !== 0) { - writer.uint32(16).uint32(message.port); - } - if (message.user !== "") { - writer.uint32(26).string(message.user); - } - if (message.password !== "") { - writer.uint32(34).string(message.password); - } - if (message.privateKey !== "") { - writer.uint32(42).string(message.privateKey); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SSHConfig { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSSHConfig(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.host = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.port = reader.uint32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.user = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.password = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.privateKey = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SSHConfig { - return { - host: isSet(object.host) ? String(object.host) : "", - port: isSet(object.port) ? Number(object.port) : 0, - user: isSet(object.user) ? String(object.user) : "", - password: isSet(object.password) ? String(object.password) : "", - privateKey: isSet(object.privateKey) ? String(object.privateKey) : "", - }; - }, - - toJSON(message: SSHConfig): unknown { - const obj: any = {}; - if (message.host !== "") { - obj.host = message.host; - } - if (message.port !== 0) { - obj.port = Math.round(message.port); - } - if (message.user !== "") { - obj.user = message.user; - } - if (message.password !== "") { - obj.password = message.password; - } - if (message.privateKey !== "") { - obj.privateKey = message.privateKey; - } - return obj; - }, - - create, I>>(base?: I): SSHConfig { - return SSHConfig.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SSHConfig { - const message = createBaseSSHConfig(); - message.host = object.host ?? ""; - message.port = object.port ?? 0; - message.user = object.user ?? ""; - message.password = object.password ?? ""; - message.privateKey = object.privateKey ?? ""; - return message; - }, -}; - -function createBaseSnowflakeConfig(): SnowflakeConfig { - return { - accountId: "", - username: "", - privateKey: "", - database: "", - warehouse: "", - role: "", - queryTimeout: 0, - s3Integration: "", - password: undefined, - metadataSchema: undefined, - }; -} - -export const SnowflakeConfig = { - encode(message: SnowflakeConfig, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.accountId !== "") { - writer.uint32(10).string(message.accountId); - } - if (message.username !== "") { - writer.uint32(18).string(message.username); - } - if (message.privateKey !== "") { - writer.uint32(26).string(message.privateKey); - } - if (message.database !== "") { - writer.uint32(34).string(message.database); - } - if (message.warehouse !== "") { - writer.uint32(50).string(message.warehouse); - } - if (message.role !== "") { - writer.uint32(58).string(message.role); - } - if (message.queryTimeout !== 0) { - writer.uint32(64).uint64(message.queryTimeout); - } - if (message.s3Integration !== "") { - writer.uint32(74).string(message.s3Integration); - } - if (message.password !== undefined) { - writer.uint32(82).string(message.password); - } - if (message.metadataSchema !== undefined) { - writer.uint32(90).string(message.metadataSchema); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SnowflakeConfig { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSnowflakeConfig(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accountId = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.username = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.privateKey = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.database = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.warehouse = reader.string(); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.role = reader.string(); - continue; - case 8: - if (tag !== 64) { - break; - } - - message.queryTimeout = longToNumber(reader.uint64() as Long); - continue; - case 9: - if (tag !== 74) { - break; - } - - message.s3Integration = reader.string(); - continue; - case 10: - if (tag !== 82) { - break; - } - - message.password = reader.string(); - continue; - case 11: - if (tag !== 90) { - break; - } - - message.metadataSchema = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SnowflakeConfig { - return { - accountId: isSet(object.accountId) ? String(object.accountId) : "", - username: isSet(object.username) ? String(object.username) : "", - privateKey: isSet(object.privateKey) ? String(object.privateKey) : "", - database: isSet(object.database) ? String(object.database) : "", - warehouse: isSet(object.warehouse) ? String(object.warehouse) : "", - role: isSet(object.role) ? String(object.role) : "", - queryTimeout: isSet(object.queryTimeout) ? Number(object.queryTimeout) : 0, - s3Integration: isSet(object.s3Integration) ? String(object.s3Integration) : "", - password: isSet(object.password) ? String(object.password) : undefined, - metadataSchema: isSet(object.metadataSchema) ? String(object.metadataSchema) : undefined, - }; - }, - - toJSON(message: SnowflakeConfig): unknown { - const obj: any = {}; - if (message.accountId !== "") { - obj.accountId = message.accountId; - } - if (message.username !== "") { - obj.username = message.username; - } - if (message.privateKey !== "") { - obj.privateKey = message.privateKey; - } - if (message.database !== "") { - obj.database = message.database; - } - if (message.warehouse !== "") { - obj.warehouse = message.warehouse; - } - if (message.role !== "") { - obj.role = message.role; - } - if (message.queryTimeout !== 0) { - obj.queryTimeout = Math.round(message.queryTimeout); - } - if (message.s3Integration !== "") { - obj.s3Integration = message.s3Integration; - } - if (message.password !== undefined) { - obj.password = message.password; - } - if (message.metadataSchema !== undefined) { - obj.metadataSchema = message.metadataSchema; - } - return obj; - }, - - create, I>>(base?: I): SnowflakeConfig { - return SnowflakeConfig.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SnowflakeConfig { - const message = createBaseSnowflakeConfig(); - message.accountId = object.accountId ?? ""; - message.username = object.username ?? ""; - message.privateKey = object.privateKey ?? ""; - message.database = object.database ?? ""; - message.warehouse = object.warehouse ?? ""; - message.role = object.role ?? ""; - message.queryTimeout = object.queryTimeout ?? 0; - message.s3Integration = object.s3Integration ?? ""; - message.password = object.password ?? undefined; - message.metadataSchema = object.metadataSchema ?? undefined; - return message; - }, -}; - -function createBaseBigqueryConfig(): BigqueryConfig { - return { - authType: "", - projectId: "", - privateKeyId: "", - privateKey: "", - clientEmail: "", - clientId: "", - authUri: "", - tokenUri: "", - authProviderX509CertUrl: "", - clientX509CertUrl: "", - datasetId: "", - }; -} - -export const BigqueryConfig = { - encode(message: BigqueryConfig, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.authType !== "") { - writer.uint32(10).string(message.authType); - } - if (message.projectId !== "") { - writer.uint32(18).string(message.projectId); - } - if (message.privateKeyId !== "") { - writer.uint32(26).string(message.privateKeyId); - } - if (message.privateKey !== "") { - writer.uint32(34).string(message.privateKey); - } - if (message.clientEmail !== "") { - writer.uint32(42).string(message.clientEmail); - } - if (message.clientId !== "") { - writer.uint32(50).string(message.clientId); - } - if (message.authUri !== "") { - writer.uint32(58).string(message.authUri); - } - if (message.tokenUri !== "") { - writer.uint32(66).string(message.tokenUri); - } - if (message.authProviderX509CertUrl !== "") { - writer.uint32(74).string(message.authProviderX509CertUrl); - } - if (message.clientX509CertUrl !== "") { - writer.uint32(82).string(message.clientX509CertUrl); - } - if (message.datasetId !== "") { - writer.uint32(90).string(message.datasetId); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): BigqueryConfig { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBigqueryConfig(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.authType = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.projectId = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.privateKeyId = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.privateKey = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.clientEmail = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.clientId = reader.string(); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.authUri = reader.string(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.tokenUri = reader.string(); - continue; - case 9: - if (tag !== 74) { - break; - } - - message.authProviderX509CertUrl = reader.string(); - continue; - case 10: - if (tag !== 82) { - break; - } - - message.clientX509CertUrl = reader.string(); - continue; - case 11: - if (tag !== 90) { - break; - } - - message.datasetId = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): BigqueryConfig { - return { - authType: isSet(object.authType) ? String(object.authType) : "", - projectId: isSet(object.projectId) ? String(object.projectId) : "", - privateKeyId: isSet(object.privateKeyId) ? String(object.privateKeyId) : "", - privateKey: isSet(object.privateKey) ? String(object.privateKey) : "", - clientEmail: isSet(object.clientEmail) ? String(object.clientEmail) : "", - clientId: isSet(object.clientId) ? String(object.clientId) : "", - authUri: isSet(object.authUri) ? String(object.authUri) : "", - tokenUri: isSet(object.tokenUri) ? String(object.tokenUri) : "", - authProviderX509CertUrl: isSet(object.authProviderX509CertUrl) ? String(object.authProviderX509CertUrl) : "", - clientX509CertUrl: isSet(object.clientX509CertUrl) ? String(object.clientX509CertUrl) : "", - datasetId: isSet(object.datasetId) ? String(object.datasetId) : "", - }; - }, - - toJSON(message: BigqueryConfig): unknown { - const obj: any = {}; - if (message.authType !== "") { - obj.authType = message.authType; - } - if (message.projectId !== "") { - obj.projectId = message.projectId; - } - if (message.privateKeyId !== "") { - obj.privateKeyId = message.privateKeyId; - } - if (message.privateKey !== "") { - obj.privateKey = message.privateKey; - } - if (message.clientEmail !== "") { - obj.clientEmail = message.clientEmail; - } - if (message.clientId !== "") { - obj.clientId = message.clientId; - } - if (message.authUri !== "") { - obj.authUri = message.authUri; - } - if (message.tokenUri !== "") { - obj.tokenUri = message.tokenUri; - } - if (message.authProviderX509CertUrl !== "") { - obj.authProviderX509CertUrl = message.authProviderX509CertUrl; - } - if (message.clientX509CertUrl !== "") { - obj.clientX509CertUrl = message.clientX509CertUrl; - } - if (message.datasetId !== "") { - obj.datasetId = message.datasetId; - } - return obj; - }, - - create, I>>(base?: I): BigqueryConfig { - return BigqueryConfig.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): BigqueryConfig { - const message = createBaseBigqueryConfig(); - message.authType = object.authType ?? ""; - message.projectId = object.projectId ?? ""; - message.privateKeyId = object.privateKeyId ?? ""; - message.privateKey = object.privateKey ?? ""; - message.clientEmail = object.clientEmail ?? ""; - message.clientId = object.clientId ?? ""; - message.authUri = object.authUri ?? ""; - message.tokenUri = object.tokenUri ?? ""; - message.authProviderX509CertUrl = object.authProviderX509CertUrl ?? ""; - message.clientX509CertUrl = object.clientX509CertUrl ?? ""; - message.datasetId = object.datasetId ?? ""; - return message; - }, -}; - -function createBaseMongoConfig(): MongoConfig { - return { username: "", password: "", clusterurl: "", clusterport: 0, database: "" }; -} - -export const MongoConfig = { - encode(message: MongoConfig, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.username !== "") { - writer.uint32(10).string(message.username); - } - if (message.password !== "") { - writer.uint32(18).string(message.password); - } - if (message.clusterurl !== "") { - writer.uint32(26).string(message.clusterurl); - } - if (message.clusterport !== 0) { - writer.uint32(32).int32(message.clusterport); - } - if (message.database !== "") { - writer.uint32(42).string(message.database); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MongoConfig { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMongoConfig(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.username = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.password = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.clusterurl = reader.string(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.clusterport = reader.int32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.database = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MongoConfig { - return { - username: isSet(object.username) ? String(object.username) : "", - password: isSet(object.password) ? String(object.password) : "", - clusterurl: isSet(object.clusterurl) ? String(object.clusterurl) : "", - clusterport: isSet(object.clusterport) ? Number(object.clusterport) : 0, - database: isSet(object.database) ? String(object.database) : "", - }; - }, - - toJSON(message: MongoConfig): unknown { - const obj: any = {}; - if (message.username !== "") { - obj.username = message.username; - } - if (message.password !== "") { - obj.password = message.password; - } - if (message.clusterurl !== "") { - obj.clusterurl = message.clusterurl; - } - if (message.clusterport !== 0) { - obj.clusterport = Math.round(message.clusterport); - } - if (message.database !== "") { - obj.database = message.database; - } - return obj; - }, - - create, I>>(base?: I): MongoConfig { - return MongoConfig.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): MongoConfig { - const message = createBaseMongoConfig(); - message.username = object.username ?? ""; - message.password = object.password ?? ""; - message.clusterurl = object.clusterurl ?? ""; - message.clusterport = object.clusterport ?? 0; - message.database = object.database ?? ""; - return message; - }, -}; - -function createBasePostgresConfig(): PostgresConfig { - return { - host: "", - port: 0, - user: "", - password: "", - database: "", - transactionSnapshot: "", - metadataSchema: undefined, - sshConfig: undefined, - }; -} - -export const PostgresConfig = { - encode(message: PostgresConfig, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.host !== "") { - writer.uint32(10).string(message.host); - } - if (message.port !== 0) { - writer.uint32(16).uint32(message.port); - } - if (message.user !== "") { - writer.uint32(26).string(message.user); - } - if (message.password !== "") { - writer.uint32(34).string(message.password); - } - if (message.database !== "") { - writer.uint32(42).string(message.database); - } - if (message.transactionSnapshot !== "") { - writer.uint32(50).string(message.transactionSnapshot); - } - if (message.metadataSchema !== undefined) { - writer.uint32(58).string(message.metadataSchema); - } - if (message.sshConfig !== undefined) { - SSHConfig.encode(message.sshConfig, writer.uint32(66).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PostgresConfig { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePostgresConfig(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.host = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.port = reader.uint32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.user = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.password = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.database = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.transactionSnapshot = reader.string(); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.metadataSchema = reader.string(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.sshConfig = SSHConfig.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PostgresConfig { - return { - host: isSet(object.host) ? String(object.host) : "", - port: isSet(object.port) ? Number(object.port) : 0, - user: isSet(object.user) ? String(object.user) : "", - password: isSet(object.password) ? String(object.password) : "", - database: isSet(object.database) ? String(object.database) : "", - transactionSnapshot: isSet(object.transactionSnapshot) ? String(object.transactionSnapshot) : "", - metadataSchema: isSet(object.metadataSchema) ? String(object.metadataSchema) : undefined, - sshConfig: isSet(object.sshConfig) ? SSHConfig.fromJSON(object.sshConfig) : undefined, - }; - }, - - toJSON(message: PostgresConfig): unknown { - const obj: any = {}; - if (message.host !== "") { - obj.host = message.host; - } - if (message.port !== 0) { - obj.port = Math.round(message.port); - } - if (message.user !== "") { - obj.user = message.user; - } - if (message.password !== "") { - obj.password = message.password; - } - if (message.database !== "") { - obj.database = message.database; - } - if (message.transactionSnapshot !== "") { - obj.transactionSnapshot = message.transactionSnapshot; - } - if (message.metadataSchema !== undefined) { - obj.metadataSchema = message.metadataSchema; - } - if (message.sshConfig !== undefined) { - obj.sshConfig = SSHConfig.toJSON(message.sshConfig); - } - return obj; - }, - - create, I>>(base?: I): PostgresConfig { - return PostgresConfig.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): PostgresConfig { - const message = createBasePostgresConfig(); - message.host = object.host ?? ""; - message.port = object.port ?? 0; - message.user = object.user ?? ""; - message.password = object.password ?? ""; - message.database = object.database ?? ""; - message.transactionSnapshot = object.transactionSnapshot ?? ""; - message.metadataSchema = object.metadataSchema ?? undefined; - message.sshConfig = (object.sshConfig !== undefined && object.sshConfig !== null) - ? SSHConfig.fromPartial(object.sshConfig) - : undefined; - return message; - }, -}; - -function createBaseEventHubConfig(): EventHubConfig { - return { - namespace: "", - resourceGroup: "", - location: "", - metadataDb: undefined, - subscriptionId: "", - partitionCount: 0, - messageRetentionInDays: 0, - }; -} - -export const EventHubConfig = { - encode(message: EventHubConfig, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.namespace !== "") { - writer.uint32(10).string(message.namespace); - } - if (message.resourceGroup !== "") { - writer.uint32(18).string(message.resourceGroup); - } - if (message.location !== "") { - writer.uint32(26).string(message.location); - } - if (message.metadataDb !== undefined) { - PostgresConfig.encode(message.metadataDb, writer.uint32(34).fork()).ldelim(); - } - if (message.subscriptionId !== "") { - writer.uint32(42).string(message.subscriptionId); - } - if (message.partitionCount !== 0) { - writer.uint32(48).uint32(message.partitionCount); - } - if (message.messageRetentionInDays !== 0) { - writer.uint32(56).uint32(message.messageRetentionInDays); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EventHubConfig { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEventHubConfig(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.namespace = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.resourceGroup = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.location = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.metadataDb = PostgresConfig.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.subscriptionId = reader.string(); - continue; - case 6: - if (tag !== 48) { - break; - } - - message.partitionCount = reader.uint32(); - continue; - case 7: - if (tag !== 56) { - break; - } - - message.messageRetentionInDays = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EventHubConfig { - return { - namespace: isSet(object.namespace) ? String(object.namespace) : "", - resourceGroup: isSet(object.resourceGroup) ? String(object.resourceGroup) : "", - location: isSet(object.location) ? String(object.location) : "", - metadataDb: isSet(object.metadataDb) ? PostgresConfig.fromJSON(object.metadataDb) : undefined, - subscriptionId: isSet(object.subscriptionId) ? String(object.subscriptionId) : "", - partitionCount: isSet(object.partitionCount) ? Number(object.partitionCount) : 0, - messageRetentionInDays: isSet(object.messageRetentionInDays) ? Number(object.messageRetentionInDays) : 0, - }; - }, - - toJSON(message: EventHubConfig): unknown { - const obj: any = {}; - if (message.namespace !== "") { - obj.namespace = message.namespace; - } - if (message.resourceGroup !== "") { - obj.resourceGroup = message.resourceGroup; - } - if (message.location !== "") { - obj.location = message.location; - } - if (message.metadataDb !== undefined) { - obj.metadataDb = PostgresConfig.toJSON(message.metadataDb); - } - if (message.subscriptionId !== "") { - obj.subscriptionId = message.subscriptionId; - } - if (message.partitionCount !== 0) { - obj.partitionCount = Math.round(message.partitionCount); - } - if (message.messageRetentionInDays !== 0) { - obj.messageRetentionInDays = Math.round(message.messageRetentionInDays); - } - return obj; - }, - - create, I>>(base?: I): EventHubConfig { - return EventHubConfig.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EventHubConfig { - const message = createBaseEventHubConfig(); - message.namespace = object.namespace ?? ""; - message.resourceGroup = object.resourceGroup ?? ""; - message.location = object.location ?? ""; - message.metadataDb = (object.metadataDb !== undefined && object.metadataDb !== null) - ? PostgresConfig.fromPartial(object.metadataDb) - : undefined; - message.subscriptionId = object.subscriptionId ?? ""; - message.partitionCount = object.partitionCount ?? 0; - message.messageRetentionInDays = object.messageRetentionInDays ?? 0; - return message; - }, -}; - -function createBaseEventHubGroupConfig(): EventHubGroupConfig { - return { eventhubs: {}, metadataDb: undefined, unnestColumns: [] }; -} - -export const EventHubGroupConfig = { - encode(message: EventHubGroupConfig, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - Object.entries(message.eventhubs).forEach(([key, value]) => { - EventHubGroupConfig_EventhubsEntry.encode({ key: key as any, value }, writer.uint32(10).fork()).ldelim(); - }); - if (message.metadataDb !== undefined) { - PostgresConfig.encode(message.metadataDb, writer.uint32(18).fork()).ldelim(); - } - for (const v of message.unnestColumns) { - writer.uint32(26).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EventHubGroupConfig { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEventHubGroupConfig(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - const entry1 = EventHubGroupConfig_EventhubsEntry.decode(reader, reader.uint32()); - if (entry1.value !== undefined) { - message.eventhubs[entry1.key] = entry1.value; - } - continue; - case 2: - if (tag !== 18) { - break; - } - - message.metadataDb = PostgresConfig.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.unnestColumns.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EventHubGroupConfig { - return { - eventhubs: isObject(object.eventhubs) - ? Object.entries(object.eventhubs).reduce<{ [key: string]: EventHubConfig }>((acc, [key, value]) => { - acc[key] = EventHubConfig.fromJSON(value); - return acc; - }, {}) - : {}, - metadataDb: isSet(object.metadataDb) ? PostgresConfig.fromJSON(object.metadataDb) : undefined, - unnestColumns: Array.isArray(object?.unnestColumns) ? object.unnestColumns.map((e: any) => String(e)) : [], - }; - }, - - toJSON(message: EventHubGroupConfig): unknown { - const obj: any = {}; - if (message.eventhubs) { - const entries = Object.entries(message.eventhubs); - if (entries.length > 0) { - obj.eventhubs = {}; - entries.forEach(([k, v]) => { - obj.eventhubs[k] = EventHubConfig.toJSON(v); - }); - } - } - if (message.metadataDb !== undefined) { - obj.metadataDb = PostgresConfig.toJSON(message.metadataDb); - } - if (message.unnestColumns?.length) { - obj.unnestColumns = message.unnestColumns; - } - return obj; - }, - - create, I>>(base?: I): EventHubGroupConfig { - return EventHubGroupConfig.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): EventHubGroupConfig { - const message = createBaseEventHubGroupConfig(); - message.eventhubs = Object.entries(object.eventhubs ?? {}).reduce<{ [key: string]: EventHubConfig }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = EventHubConfig.fromPartial(value); - } - return acc; - }, - {}, - ); - message.metadataDb = (object.metadataDb !== undefined && object.metadataDb !== null) - ? PostgresConfig.fromPartial(object.metadataDb) - : undefined; - message.unnestColumns = object.unnestColumns?.map((e) => e) || []; - return message; - }, -}; - -function createBaseEventHubGroupConfig_EventhubsEntry(): EventHubGroupConfig_EventhubsEntry { - return { key: "", value: undefined }; -} - -export const EventHubGroupConfig_EventhubsEntry = { - encode(message: EventHubGroupConfig_EventhubsEntry, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== undefined) { - EventHubConfig.encode(message.value, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): EventHubGroupConfig_EventhubsEntry { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEventHubGroupConfig_EventhubsEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = EventHubConfig.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): EventHubGroupConfig_EventhubsEntry { - return { - key: isSet(object.key) ? String(object.key) : "", - value: isSet(object.value) ? EventHubConfig.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: EventHubGroupConfig_EventhubsEntry): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== undefined) { - obj.value = EventHubConfig.toJSON(message.value); - } - return obj; - }, - - create, I>>( - base?: I, - ): EventHubGroupConfig_EventhubsEntry { - return EventHubGroupConfig_EventhubsEntry.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): EventHubGroupConfig_EventhubsEntry { - const message = createBaseEventHubGroupConfig_EventhubsEntry(); - message.key = object.key ?? ""; - message.value = (object.value !== undefined && object.value !== null) - ? EventHubConfig.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseS3Config(): S3Config { - return { - url: "", - accessKeyId: undefined, - secretAccessKey: undefined, - roleArn: undefined, - region: undefined, - endpoint: undefined, - metadataDb: undefined, - }; -} - -export const S3Config = { - encode(message: S3Config, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.url !== "") { - writer.uint32(10).string(message.url); - } - if (message.accessKeyId !== undefined) { - writer.uint32(18).string(message.accessKeyId); - } - if (message.secretAccessKey !== undefined) { - writer.uint32(26).string(message.secretAccessKey); - } - if (message.roleArn !== undefined) { - writer.uint32(34).string(message.roleArn); - } - if (message.region !== undefined) { - writer.uint32(42).string(message.region); - } - if (message.endpoint !== undefined) { - writer.uint32(50).string(message.endpoint); - } - if (message.metadataDb !== undefined) { - PostgresConfig.encode(message.metadataDb, writer.uint32(58).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): S3Config { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseS3Config(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.url = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.accessKeyId = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.secretAccessKey = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.roleArn = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.region = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.endpoint = reader.string(); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.metadataDb = PostgresConfig.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): S3Config { - return { - url: isSet(object.url) ? String(object.url) : "", - accessKeyId: isSet(object.accessKeyId) ? String(object.accessKeyId) : undefined, - secretAccessKey: isSet(object.secretAccessKey) ? String(object.secretAccessKey) : undefined, - roleArn: isSet(object.roleArn) ? String(object.roleArn) : undefined, - region: isSet(object.region) ? String(object.region) : undefined, - endpoint: isSet(object.endpoint) ? String(object.endpoint) : undefined, - metadataDb: isSet(object.metadataDb) ? PostgresConfig.fromJSON(object.metadataDb) : undefined, - }; - }, - - toJSON(message: S3Config): unknown { - const obj: any = {}; - if (message.url !== "") { - obj.url = message.url; - } - if (message.accessKeyId !== undefined) { - obj.accessKeyId = message.accessKeyId; - } - if (message.secretAccessKey !== undefined) { - obj.secretAccessKey = message.secretAccessKey; - } - if (message.roleArn !== undefined) { - obj.roleArn = message.roleArn; - } - if (message.region !== undefined) { - obj.region = message.region; - } - if (message.endpoint !== undefined) { - obj.endpoint = message.endpoint; - } - if (message.metadataDb !== undefined) { - obj.metadataDb = PostgresConfig.toJSON(message.metadataDb); - } - return obj; - }, - - create, I>>(base?: I): S3Config { - return S3Config.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): S3Config { - const message = createBaseS3Config(); - message.url = object.url ?? ""; - message.accessKeyId = object.accessKeyId ?? undefined; - message.secretAccessKey = object.secretAccessKey ?? undefined; - message.roleArn = object.roleArn ?? undefined; - message.region = object.region ?? undefined; - message.endpoint = object.endpoint ?? undefined; - message.metadataDb = (object.metadataDb !== undefined && object.metadataDb !== null) - ? PostgresConfig.fromPartial(object.metadataDb) - : undefined; - return message; - }, -}; - -function createBaseSqlServerConfig(): SqlServerConfig { - return { server: "", port: 0, user: "", password: "", database: "" }; -} - -export const SqlServerConfig = { - encode(message: SqlServerConfig, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.server !== "") { - writer.uint32(10).string(message.server); - } - if (message.port !== 0) { - writer.uint32(16).uint32(message.port); - } - if (message.user !== "") { - writer.uint32(26).string(message.user); - } - if (message.password !== "") { - writer.uint32(34).string(message.password); - } - if (message.database !== "") { - writer.uint32(42).string(message.database); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SqlServerConfig { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSqlServerConfig(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.server = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.port = reader.uint32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.user = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.password = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.database = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SqlServerConfig { - return { - server: isSet(object.server) ? String(object.server) : "", - port: isSet(object.port) ? Number(object.port) : 0, - user: isSet(object.user) ? String(object.user) : "", - password: isSet(object.password) ? String(object.password) : "", - database: isSet(object.database) ? String(object.database) : "", - }; - }, - - toJSON(message: SqlServerConfig): unknown { - const obj: any = {}; - if (message.server !== "") { - obj.server = message.server; - } - if (message.port !== 0) { - obj.port = Math.round(message.port); - } - if (message.user !== "") { - obj.user = message.user; - } - if (message.password !== "") { - obj.password = message.password; - } - if (message.database !== "") { - obj.database = message.database; - } - return obj; - }, - - create, I>>(base?: I): SqlServerConfig { - return SqlServerConfig.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SqlServerConfig { - const message = createBaseSqlServerConfig(); - message.server = object.server ?? ""; - message.port = object.port ?? 0; - message.user = object.user ?? ""; - message.password = object.password ?? ""; - message.database = object.database ?? ""; - return message; - }, -}; - -function createBasePeer(): Peer { - return { - name: "", - type: 0, - snowflakeConfig: undefined, - bigqueryConfig: undefined, - mongoConfig: undefined, - postgresConfig: undefined, - eventhubConfig: undefined, - s3Config: undefined, - sqlserverConfig: undefined, - eventhubGroupConfig: undefined, - }; -} - -export const Peer = { - encode(message: Peer, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.type !== 0) { - writer.uint32(16).int32(message.type); - } - if (message.snowflakeConfig !== undefined) { - SnowflakeConfig.encode(message.snowflakeConfig, writer.uint32(26).fork()).ldelim(); - } - if (message.bigqueryConfig !== undefined) { - BigqueryConfig.encode(message.bigqueryConfig, writer.uint32(34).fork()).ldelim(); - } - if (message.mongoConfig !== undefined) { - MongoConfig.encode(message.mongoConfig, writer.uint32(42).fork()).ldelim(); - } - if (message.postgresConfig !== undefined) { - PostgresConfig.encode(message.postgresConfig, writer.uint32(50).fork()).ldelim(); - } - if (message.eventhubConfig !== undefined) { - EventHubConfig.encode(message.eventhubConfig, writer.uint32(58).fork()).ldelim(); - } - if (message.s3Config !== undefined) { - S3Config.encode(message.s3Config, writer.uint32(66).fork()).ldelim(); - } - if (message.sqlserverConfig !== undefined) { - SqlServerConfig.encode(message.sqlserverConfig, writer.uint32(74).fork()).ldelim(); - } - if (message.eventhubGroupConfig !== undefined) { - EventHubGroupConfig.encode(message.eventhubGroupConfig, writer.uint32(82).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Peer { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePeer(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.type = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.snowflakeConfig = SnowflakeConfig.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.bigqueryConfig = BigqueryConfig.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.mongoConfig = MongoConfig.decode(reader, reader.uint32()); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.postgresConfig = PostgresConfig.decode(reader, reader.uint32()); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.eventhubConfig = EventHubConfig.decode(reader, reader.uint32()); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.s3Config = S3Config.decode(reader, reader.uint32()); - continue; - case 9: - if (tag !== 74) { - break; - } - - message.sqlserverConfig = SqlServerConfig.decode(reader, reader.uint32()); - continue; - case 10: - if (tag !== 82) { - break; - } - - message.eventhubGroupConfig = EventHubGroupConfig.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Peer { - return { - name: isSet(object.name) ? String(object.name) : "", - type: isSet(object.type) ? dBTypeFromJSON(object.type) : 0, - snowflakeConfig: isSet(object.snowflakeConfig) ? SnowflakeConfig.fromJSON(object.snowflakeConfig) : undefined, - bigqueryConfig: isSet(object.bigqueryConfig) ? BigqueryConfig.fromJSON(object.bigqueryConfig) : undefined, - mongoConfig: isSet(object.mongoConfig) ? MongoConfig.fromJSON(object.mongoConfig) : undefined, - postgresConfig: isSet(object.postgresConfig) ? PostgresConfig.fromJSON(object.postgresConfig) : undefined, - eventhubConfig: isSet(object.eventhubConfig) ? EventHubConfig.fromJSON(object.eventhubConfig) : undefined, - s3Config: isSet(object.s3Config) ? S3Config.fromJSON(object.s3Config) : undefined, - sqlserverConfig: isSet(object.sqlserverConfig) ? SqlServerConfig.fromJSON(object.sqlserverConfig) : undefined, - eventhubGroupConfig: isSet(object.eventhubGroupConfig) - ? EventHubGroupConfig.fromJSON(object.eventhubGroupConfig) - : undefined, - }; - }, - - toJSON(message: Peer): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.type !== 0) { - obj.type = dBTypeToJSON(message.type); - } - if (message.snowflakeConfig !== undefined) { - obj.snowflakeConfig = SnowflakeConfig.toJSON(message.snowflakeConfig); - } - if (message.bigqueryConfig !== undefined) { - obj.bigqueryConfig = BigqueryConfig.toJSON(message.bigqueryConfig); - } - if (message.mongoConfig !== undefined) { - obj.mongoConfig = MongoConfig.toJSON(message.mongoConfig); - } - if (message.postgresConfig !== undefined) { - obj.postgresConfig = PostgresConfig.toJSON(message.postgresConfig); - } - if (message.eventhubConfig !== undefined) { - obj.eventhubConfig = EventHubConfig.toJSON(message.eventhubConfig); - } - if (message.s3Config !== undefined) { - obj.s3Config = S3Config.toJSON(message.s3Config); - } - if (message.sqlserverConfig !== undefined) { - obj.sqlserverConfig = SqlServerConfig.toJSON(message.sqlserverConfig); - } - if (message.eventhubGroupConfig !== undefined) { - obj.eventhubGroupConfig = EventHubGroupConfig.toJSON(message.eventhubGroupConfig); - } - return obj; - }, - - create, I>>(base?: I): Peer { - return Peer.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): Peer { - const message = createBasePeer(); - message.name = object.name ?? ""; - message.type = object.type ?? 0; - message.snowflakeConfig = (object.snowflakeConfig !== undefined && object.snowflakeConfig !== null) - ? SnowflakeConfig.fromPartial(object.snowflakeConfig) - : undefined; - message.bigqueryConfig = (object.bigqueryConfig !== undefined && object.bigqueryConfig !== null) - ? BigqueryConfig.fromPartial(object.bigqueryConfig) - : undefined; - message.mongoConfig = (object.mongoConfig !== undefined && object.mongoConfig !== null) - ? MongoConfig.fromPartial(object.mongoConfig) - : undefined; - message.postgresConfig = (object.postgresConfig !== undefined && object.postgresConfig !== null) - ? PostgresConfig.fromPartial(object.postgresConfig) - : undefined; - message.eventhubConfig = (object.eventhubConfig !== undefined && object.eventhubConfig !== null) - ? EventHubConfig.fromPartial(object.eventhubConfig) - : undefined; - message.s3Config = (object.s3Config !== undefined && object.s3Config !== null) - ? S3Config.fromPartial(object.s3Config) - : undefined; - message.sqlserverConfig = (object.sqlserverConfig !== undefined && object.sqlserverConfig !== null) - ? SqlServerConfig.fromPartial(object.sqlserverConfig) - : undefined; - message.eventhubGroupConfig = (object.eventhubGroupConfig !== undefined && object.eventhubGroupConfig !== null) - ? EventHubGroupConfig.fromPartial(object.eventhubGroupConfig) - : undefined; - return message; - }, -}; - -declare const self: any | undefined; -declare const window: any | undefined; -declare const global: any | undefined; -const tsProtoGlobalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function longToNumber(long: Long): number { - if (long.gt(Number.MAX_SAFE_INTEGER)) { - throw new tsProtoGlobalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); - } - return long.toNumber(); -} - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ui/grpc_generated/route.ts b/ui/grpc_generated/route.ts deleted file mode 100644 index c59fa54c2a..0000000000 --- a/ui/grpc_generated/route.ts +++ /dev/null @@ -1,3368 +0,0 @@ -/* eslint-disable */ -import { - CallOptions, - ChannelCredentials, - Client, - ClientOptions, - ClientUnaryCall, - handleUnaryCall, - makeGenericClientConstructor, - Metadata, - ServiceError, - UntypedServiceImplementation, -} from "@grpc/grpc-js"; -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { FlowConnectionConfigs, QRepConfig } from "./flow"; -import { Timestamp } from "./google/protobuf/timestamp"; -import { Peer } from "./peers"; - -export const protobufPackage = "peerdb_route"; - -export enum ValidatePeerStatus { - CREATION_UNKNOWN = 0, - VALID = 1, - INVALID = 2, - UNRECOGNIZED = -1, -} - -export function validatePeerStatusFromJSON(object: any): ValidatePeerStatus { - switch (object) { - case 0: - case "CREATION_UNKNOWN": - return ValidatePeerStatus.CREATION_UNKNOWN; - case 1: - case "VALID": - return ValidatePeerStatus.VALID; - case 2: - case "INVALID": - return ValidatePeerStatus.INVALID; - case -1: - case "UNRECOGNIZED": - default: - return ValidatePeerStatus.UNRECOGNIZED; - } -} - -export function validatePeerStatusToJSON(object: ValidatePeerStatus): string { - switch (object) { - case ValidatePeerStatus.CREATION_UNKNOWN: - return "CREATION_UNKNOWN"; - case ValidatePeerStatus.VALID: - return "VALID"; - case ValidatePeerStatus.INVALID: - return "INVALID"; - case ValidatePeerStatus.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export enum CreatePeerStatus { - VALIDATION_UNKNOWN = 0, - CREATED = 1, - FAILED = 2, - UNRECOGNIZED = -1, -} - -export function createPeerStatusFromJSON(object: any): CreatePeerStatus { - switch (object) { - case 0: - case "VALIDATION_UNKNOWN": - return CreatePeerStatus.VALIDATION_UNKNOWN; - case 1: - case "CREATED": - return CreatePeerStatus.CREATED; - case 2: - case "FAILED": - return CreatePeerStatus.FAILED; - case -1: - case "UNRECOGNIZED": - default: - return CreatePeerStatus.UNRECOGNIZED; - } -} - -export function createPeerStatusToJSON(object: CreatePeerStatus): string { - switch (object) { - case CreatePeerStatus.VALIDATION_UNKNOWN: - return "VALIDATION_UNKNOWN"; - case CreatePeerStatus.CREATED: - return "CREATED"; - case CreatePeerStatus.FAILED: - return "FAILED"; - case CreatePeerStatus.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** in the future, consider moving DropFlow to this and reduce route surface */ -export enum FlowState { - STATE_UNKNOWN = 0, - STATE_RUNNING = 1, - STATE_PAUSED = 2, - UNRECOGNIZED = -1, -} - -export function flowStateFromJSON(object: any): FlowState { - switch (object) { - case 0: - case "STATE_UNKNOWN": - return FlowState.STATE_UNKNOWN; - case 1: - case "STATE_RUNNING": - return FlowState.STATE_RUNNING; - case 2: - case "STATE_PAUSED": - return FlowState.STATE_PAUSED; - case -1: - case "UNRECOGNIZED": - default: - return FlowState.UNRECOGNIZED; - } -} - -export function flowStateToJSON(object: FlowState): string { - switch (object) { - case FlowState.STATE_UNKNOWN: - return "STATE_UNKNOWN"; - case FlowState.STATE_RUNNING: - return "STATE_RUNNING"; - case FlowState.STATE_PAUSED: - return "STATE_PAUSED"; - case FlowState.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface CreateCDCFlowRequest { - connectionConfigs: FlowConnectionConfigs | undefined; - createCatalogEntry: boolean; -} - -export interface CreateCDCFlowResponse { - worflowId: string; -} - -export interface CreateQRepFlowRequest { - qrepConfig: QRepConfig | undefined; - createCatalogEntry: boolean; -} - -export interface CreateQRepFlowResponse { - worflowId: string; -} - -export interface ShutdownRequest { - workflowId: string; - flowJobName: string; - sourcePeer: Peer | undefined; - destinationPeer: Peer | undefined; - removeFlowEntry: boolean; -} - -export interface ShutdownResponse { - ok: boolean; - errorMessage: string; -} - -export interface ValidatePeerRequest { - peer: Peer | undefined; -} - -export interface CreatePeerRequest { - peer: Peer | undefined; -} - -export interface DropPeerRequest { - peerName: string; -} - -export interface DropPeerResponse { - ok: boolean; - errorMessage: string; -} - -export interface ValidatePeerResponse { - status: ValidatePeerStatus; - message: string; -} - -export interface CreatePeerResponse { - status: CreatePeerStatus; - message: string; -} - -export interface MirrorStatusRequest { - flowJobName: string; -} - -export interface PartitionStatus { - partitionId: string; - startTime: Date | undefined; - endTime: Date | undefined; - numRows: number; -} - -export interface QRepMirrorStatus { - config: - | QRepConfig - | undefined; - /** - * TODO make note to see if we are still in initial copy - * or if we are in the continuous streaming mode. - */ - partitions: PartitionStatus[]; -} - -export interface CDCSyncStatus { - startLsn: number; - endLsn: number; - numRows: number; - startTime: Date | undefined; - endTime: Date | undefined; -} - -export interface PeerSchemasResponse { - schemas: string[]; -} - -export interface SchemaTablesRequest { - peerName: string; - schemaName: string; -} - -export interface SchemaTablesResponse { - tables: string[]; -} - -export interface AllTablesResponse { - tables: string[]; -} - -export interface TableColumnsRequest { - peerName: string; - schemaName: string; - tableName: string; -} - -export interface TableColumnsResponse { - columns: string[]; -} - -export interface PostgresPeerActivityInfoRequest { - peerName: string; -} - -export interface SlotInfo { - slotName: string; - redoLSN: string; - restartLSN: string; - active: boolean; - lagInMb: number; - confirmedFlushLSN: string; - walStatus: string; -} - -export interface StatInfo { - pid: number; - waitEvent: string; - waitEventType: string; - queryStart: string; - query: string; - duration: number; -} - -export interface PeerSlotResponse { - slotData: SlotInfo[]; -} - -export interface PeerStatResponse { - statData: StatInfo[]; -} - -export interface SnapshotStatus { - clones: QRepMirrorStatus[]; -} - -export interface CDCMirrorStatus { - config: FlowConnectionConfigs | undefined; - snapshotStatus: SnapshotStatus | undefined; - cdcSyncs: CDCSyncStatus[]; -} - -export interface MirrorStatusResponse { - flowJobName: string; - qrepStatus?: QRepMirrorStatus | undefined; - cdcStatus?: CDCMirrorStatus | undefined; - errorMessage: string; -} - -export interface FlowStateChangeRequest { - workflowId: string; - flowJobName: string; - requestedFlowState: FlowState; -} - -export interface FlowStateChangeResponse { - ok: boolean; - errorMessage: string; -} - -export interface PeerDBVersionRequest { -} - -export interface PeerDBVersionResponse { - version: string; -} - -function createBaseCreateCDCFlowRequest(): CreateCDCFlowRequest { - return { connectionConfigs: undefined, createCatalogEntry: false }; -} - -export const CreateCDCFlowRequest = { - encode(message: CreateCDCFlowRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.connectionConfigs !== undefined) { - FlowConnectionConfigs.encode(message.connectionConfigs, writer.uint32(10).fork()).ldelim(); - } - if (message.createCatalogEntry === true) { - writer.uint32(16).bool(message.createCatalogEntry); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreateCDCFlowRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateCDCFlowRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.connectionConfigs = FlowConnectionConfigs.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.createCatalogEntry = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateCDCFlowRequest { - return { - connectionConfigs: isSet(object.connectionConfigs) - ? FlowConnectionConfigs.fromJSON(object.connectionConfigs) - : undefined, - createCatalogEntry: isSet(object.createCatalogEntry) ? Boolean(object.createCatalogEntry) : false, - }; - }, - - toJSON(message: CreateCDCFlowRequest): unknown { - const obj: any = {}; - if (message.connectionConfigs !== undefined) { - obj.connectionConfigs = FlowConnectionConfigs.toJSON(message.connectionConfigs); - } - if (message.createCatalogEntry === true) { - obj.createCatalogEntry = message.createCatalogEntry; - } - return obj; - }, - - create, I>>(base?: I): CreateCDCFlowRequest { - return CreateCDCFlowRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CreateCDCFlowRequest { - const message = createBaseCreateCDCFlowRequest(); - message.connectionConfigs = (object.connectionConfigs !== undefined && object.connectionConfigs !== null) - ? FlowConnectionConfigs.fromPartial(object.connectionConfigs) - : undefined; - message.createCatalogEntry = object.createCatalogEntry ?? false; - return message; - }, -}; - -function createBaseCreateCDCFlowResponse(): CreateCDCFlowResponse { - return { worflowId: "" }; -} - -export const CreateCDCFlowResponse = { - encode(message: CreateCDCFlowResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.worflowId !== "") { - writer.uint32(10).string(message.worflowId); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreateCDCFlowResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateCDCFlowResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.worflowId = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateCDCFlowResponse { - return { worflowId: isSet(object.worflowId) ? String(object.worflowId) : "" }; - }, - - toJSON(message: CreateCDCFlowResponse): unknown { - const obj: any = {}; - if (message.worflowId !== "") { - obj.worflowId = message.worflowId; - } - return obj; - }, - - create, I>>(base?: I): CreateCDCFlowResponse { - return CreateCDCFlowResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CreateCDCFlowResponse { - const message = createBaseCreateCDCFlowResponse(); - message.worflowId = object.worflowId ?? ""; - return message; - }, -}; - -function createBaseCreateQRepFlowRequest(): CreateQRepFlowRequest { - return { qrepConfig: undefined, createCatalogEntry: false }; -} - -export const CreateQRepFlowRequest = { - encode(message: CreateQRepFlowRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.qrepConfig !== undefined) { - QRepConfig.encode(message.qrepConfig, writer.uint32(10).fork()).ldelim(); - } - if (message.createCatalogEntry === true) { - writer.uint32(16).bool(message.createCatalogEntry); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreateQRepFlowRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateQRepFlowRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.qrepConfig = QRepConfig.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.createCatalogEntry = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateQRepFlowRequest { - return { - qrepConfig: isSet(object.qrepConfig) ? QRepConfig.fromJSON(object.qrepConfig) : undefined, - createCatalogEntry: isSet(object.createCatalogEntry) ? Boolean(object.createCatalogEntry) : false, - }; - }, - - toJSON(message: CreateQRepFlowRequest): unknown { - const obj: any = {}; - if (message.qrepConfig !== undefined) { - obj.qrepConfig = QRepConfig.toJSON(message.qrepConfig); - } - if (message.createCatalogEntry === true) { - obj.createCatalogEntry = message.createCatalogEntry; - } - return obj; - }, - - create, I>>(base?: I): CreateQRepFlowRequest { - return CreateQRepFlowRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CreateQRepFlowRequest { - const message = createBaseCreateQRepFlowRequest(); - message.qrepConfig = (object.qrepConfig !== undefined && object.qrepConfig !== null) - ? QRepConfig.fromPartial(object.qrepConfig) - : undefined; - message.createCatalogEntry = object.createCatalogEntry ?? false; - return message; - }, -}; - -function createBaseCreateQRepFlowResponse(): CreateQRepFlowResponse { - return { worflowId: "" }; -} - -export const CreateQRepFlowResponse = { - encode(message: CreateQRepFlowResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.worflowId !== "") { - writer.uint32(10).string(message.worflowId); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreateQRepFlowResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreateQRepFlowResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.worflowId = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreateQRepFlowResponse { - return { worflowId: isSet(object.worflowId) ? String(object.worflowId) : "" }; - }, - - toJSON(message: CreateQRepFlowResponse): unknown { - const obj: any = {}; - if (message.worflowId !== "") { - obj.worflowId = message.worflowId; - } - return obj; - }, - - create, I>>(base?: I): CreateQRepFlowResponse { - return CreateQRepFlowResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CreateQRepFlowResponse { - const message = createBaseCreateQRepFlowResponse(); - message.worflowId = object.worflowId ?? ""; - return message; - }, -}; - -function createBaseShutdownRequest(): ShutdownRequest { - return { workflowId: "", flowJobName: "", sourcePeer: undefined, destinationPeer: undefined, removeFlowEntry: false }; -} - -export const ShutdownRequest = { - encode(message: ShutdownRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.workflowId !== "") { - writer.uint32(10).string(message.workflowId); - } - if (message.flowJobName !== "") { - writer.uint32(18).string(message.flowJobName); - } - if (message.sourcePeer !== undefined) { - Peer.encode(message.sourcePeer, writer.uint32(26).fork()).ldelim(); - } - if (message.destinationPeer !== undefined) { - Peer.encode(message.destinationPeer, writer.uint32(34).fork()).ldelim(); - } - if (message.removeFlowEntry === true) { - writer.uint32(40).bool(message.removeFlowEntry); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ShutdownRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseShutdownRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.workflowId = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.sourcePeer = Peer.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.destinationPeer = Peer.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 40) { - break; - } - - message.removeFlowEntry = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ShutdownRequest { - return { - workflowId: isSet(object.workflowId) ? String(object.workflowId) : "", - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - sourcePeer: isSet(object.sourcePeer) ? Peer.fromJSON(object.sourcePeer) : undefined, - destinationPeer: isSet(object.destinationPeer) ? Peer.fromJSON(object.destinationPeer) : undefined, - removeFlowEntry: isSet(object.removeFlowEntry) ? Boolean(object.removeFlowEntry) : false, - }; - }, - - toJSON(message: ShutdownRequest): unknown { - const obj: any = {}; - if (message.workflowId !== "") { - obj.workflowId = message.workflowId; - } - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.sourcePeer !== undefined) { - obj.sourcePeer = Peer.toJSON(message.sourcePeer); - } - if (message.destinationPeer !== undefined) { - obj.destinationPeer = Peer.toJSON(message.destinationPeer); - } - if (message.removeFlowEntry === true) { - obj.removeFlowEntry = message.removeFlowEntry; - } - return obj; - }, - - create, I>>(base?: I): ShutdownRequest { - return ShutdownRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): ShutdownRequest { - const message = createBaseShutdownRequest(); - message.workflowId = object.workflowId ?? ""; - message.flowJobName = object.flowJobName ?? ""; - message.sourcePeer = (object.sourcePeer !== undefined && object.sourcePeer !== null) - ? Peer.fromPartial(object.sourcePeer) - : undefined; - message.destinationPeer = (object.destinationPeer !== undefined && object.destinationPeer !== null) - ? Peer.fromPartial(object.destinationPeer) - : undefined; - message.removeFlowEntry = object.removeFlowEntry ?? false; - return message; - }, -}; - -function createBaseShutdownResponse(): ShutdownResponse { - return { ok: false, errorMessage: "" }; -} - -export const ShutdownResponse = { - encode(message: ShutdownResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.ok === true) { - writer.uint32(8).bool(message.ok); - } - if (message.errorMessage !== "") { - writer.uint32(18).string(message.errorMessage); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ShutdownResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseShutdownResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.ok = reader.bool(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.errorMessage = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ShutdownResponse { - return { - ok: isSet(object.ok) ? Boolean(object.ok) : false, - errorMessage: isSet(object.errorMessage) ? String(object.errorMessage) : "", - }; - }, - - toJSON(message: ShutdownResponse): unknown { - const obj: any = {}; - if (message.ok === true) { - obj.ok = message.ok; - } - if (message.errorMessage !== "") { - obj.errorMessage = message.errorMessage; - } - return obj; - }, - - create, I>>(base?: I): ShutdownResponse { - return ShutdownResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): ShutdownResponse { - const message = createBaseShutdownResponse(); - message.ok = object.ok ?? false; - message.errorMessage = object.errorMessage ?? ""; - return message; - }, -}; - -function createBaseValidatePeerRequest(): ValidatePeerRequest { - return { peer: undefined }; -} - -export const ValidatePeerRequest = { - encode(message: ValidatePeerRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peer !== undefined) { - Peer.encode(message.peer, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ValidatePeerRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseValidatePeerRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peer = Peer.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ValidatePeerRequest { - return { peer: isSet(object.peer) ? Peer.fromJSON(object.peer) : undefined }; - }, - - toJSON(message: ValidatePeerRequest): unknown { - const obj: any = {}; - if (message.peer !== undefined) { - obj.peer = Peer.toJSON(message.peer); - } - return obj; - }, - - create, I>>(base?: I): ValidatePeerRequest { - return ValidatePeerRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): ValidatePeerRequest { - const message = createBaseValidatePeerRequest(); - message.peer = (object.peer !== undefined && object.peer !== null) ? Peer.fromPartial(object.peer) : undefined; - return message; - }, -}; - -function createBaseCreatePeerRequest(): CreatePeerRequest { - return { peer: undefined }; -} - -export const CreatePeerRequest = { - encode(message: CreatePeerRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peer !== undefined) { - Peer.encode(message.peer, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreatePeerRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreatePeerRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peer = Peer.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreatePeerRequest { - return { peer: isSet(object.peer) ? Peer.fromJSON(object.peer) : undefined }; - }, - - toJSON(message: CreatePeerRequest): unknown { - const obj: any = {}; - if (message.peer !== undefined) { - obj.peer = Peer.toJSON(message.peer); - } - return obj; - }, - - create, I>>(base?: I): CreatePeerRequest { - return CreatePeerRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CreatePeerRequest { - const message = createBaseCreatePeerRequest(); - message.peer = (object.peer !== undefined && object.peer !== null) ? Peer.fromPartial(object.peer) : undefined; - return message; - }, -}; - -function createBaseDropPeerRequest(): DropPeerRequest { - return { peerName: "" }; -} - -export const DropPeerRequest = { - encode(message: DropPeerRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerName !== "") { - writer.uint32(10).string(message.peerName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DropPeerRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDropPeerRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DropPeerRequest { - return { peerName: isSet(object.peerName) ? String(object.peerName) : "" }; - }, - - toJSON(message: DropPeerRequest): unknown { - const obj: any = {}; - if (message.peerName !== "") { - obj.peerName = message.peerName; - } - return obj; - }, - - create, I>>(base?: I): DropPeerRequest { - return DropPeerRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): DropPeerRequest { - const message = createBaseDropPeerRequest(); - message.peerName = object.peerName ?? ""; - return message; - }, -}; - -function createBaseDropPeerResponse(): DropPeerResponse { - return { ok: false, errorMessage: "" }; -} - -export const DropPeerResponse = { - encode(message: DropPeerResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.ok === true) { - writer.uint32(8).bool(message.ok); - } - if (message.errorMessage !== "") { - writer.uint32(18).string(message.errorMessage); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DropPeerResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDropPeerResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.ok = reader.bool(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.errorMessage = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DropPeerResponse { - return { - ok: isSet(object.ok) ? Boolean(object.ok) : false, - errorMessage: isSet(object.errorMessage) ? String(object.errorMessage) : "", - }; - }, - - toJSON(message: DropPeerResponse): unknown { - const obj: any = {}; - if (message.ok === true) { - obj.ok = message.ok; - } - if (message.errorMessage !== "") { - obj.errorMessage = message.errorMessage; - } - return obj; - }, - - create, I>>(base?: I): DropPeerResponse { - return DropPeerResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): DropPeerResponse { - const message = createBaseDropPeerResponse(); - message.ok = object.ok ?? false; - message.errorMessage = object.errorMessage ?? ""; - return message; - }, -}; - -function createBaseValidatePeerResponse(): ValidatePeerResponse { - return { status: 0, message: "" }; -} - -export const ValidatePeerResponse = { - encode(message: ValidatePeerResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.status !== 0) { - writer.uint32(8).int32(message.status); - } - if (message.message !== "") { - writer.uint32(18).string(message.message); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ValidatePeerResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseValidatePeerResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.status = reader.int32() as any; - continue; - case 2: - if (tag !== 18) { - break; - } - - message.message = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ValidatePeerResponse { - return { - status: isSet(object.status) ? validatePeerStatusFromJSON(object.status) : 0, - message: isSet(object.message) ? String(object.message) : "", - }; - }, - - toJSON(message: ValidatePeerResponse): unknown { - const obj: any = {}; - if (message.status !== 0) { - obj.status = validatePeerStatusToJSON(message.status); - } - if (message.message !== "") { - obj.message = message.message; - } - return obj; - }, - - create, I>>(base?: I): ValidatePeerResponse { - return ValidatePeerResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): ValidatePeerResponse { - const message = createBaseValidatePeerResponse(); - message.status = object.status ?? 0; - message.message = object.message ?? ""; - return message; - }, -}; - -function createBaseCreatePeerResponse(): CreatePeerResponse { - return { status: 0, message: "" }; -} - -export const CreatePeerResponse = { - encode(message: CreatePeerResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.status !== 0) { - writer.uint32(8).int32(message.status); - } - if (message.message !== "") { - writer.uint32(18).string(message.message); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CreatePeerResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCreatePeerResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.status = reader.int32() as any; - continue; - case 2: - if (tag !== 18) { - break; - } - - message.message = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CreatePeerResponse { - return { - status: isSet(object.status) ? createPeerStatusFromJSON(object.status) : 0, - message: isSet(object.message) ? String(object.message) : "", - }; - }, - - toJSON(message: CreatePeerResponse): unknown { - const obj: any = {}; - if (message.status !== 0) { - obj.status = createPeerStatusToJSON(message.status); - } - if (message.message !== "") { - obj.message = message.message; - } - return obj; - }, - - create, I>>(base?: I): CreatePeerResponse { - return CreatePeerResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CreatePeerResponse { - const message = createBaseCreatePeerResponse(); - message.status = object.status ?? 0; - message.message = object.message ?? ""; - return message; - }, -}; - -function createBaseMirrorStatusRequest(): MirrorStatusRequest { - return { flowJobName: "" }; -} - -export const MirrorStatusRequest = { - encode(message: MirrorStatusRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowJobName !== "") { - writer.uint32(10).string(message.flowJobName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MirrorStatusRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMirrorStatusRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.flowJobName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MirrorStatusRequest { - return { flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "" }; - }, - - toJSON(message: MirrorStatusRequest): unknown { - const obj: any = {}; - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - return obj; - }, - - create, I>>(base?: I): MirrorStatusRequest { - return MirrorStatusRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): MirrorStatusRequest { - const message = createBaseMirrorStatusRequest(); - message.flowJobName = object.flowJobName ?? ""; - return message; - }, -}; - -function createBasePartitionStatus(): PartitionStatus { - return { partitionId: "", startTime: undefined, endTime: undefined, numRows: 0 }; -} - -export const PartitionStatus = { - encode(message: PartitionStatus, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.partitionId !== "") { - writer.uint32(10).string(message.partitionId); - } - if (message.startTime !== undefined) { - Timestamp.encode(toTimestamp(message.startTime), writer.uint32(18).fork()).ldelim(); - } - if (message.endTime !== undefined) { - Timestamp.encode(toTimestamp(message.endTime), writer.uint32(26).fork()).ldelim(); - } - if (message.numRows !== 0) { - writer.uint32(32).int32(message.numRows); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PartitionStatus { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePartitionStatus(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.partitionId = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.startTime = fromTimestamp(Timestamp.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.endTime = fromTimestamp(Timestamp.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.numRows = reader.int32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PartitionStatus { - return { - partitionId: isSet(object.partitionId) ? String(object.partitionId) : "", - startTime: isSet(object.startTime) ? fromJsonTimestamp(object.startTime) : undefined, - endTime: isSet(object.endTime) ? fromJsonTimestamp(object.endTime) : undefined, - numRows: isSet(object.numRows) ? Number(object.numRows) : 0, - }; - }, - - toJSON(message: PartitionStatus): unknown { - const obj: any = {}; - if (message.partitionId !== "") { - obj.partitionId = message.partitionId; - } - if (message.startTime !== undefined) { - obj.startTime = message.startTime.toISOString(); - } - if (message.endTime !== undefined) { - obj.endTime = message.endTime.toISOString(); - } - if (message.numRows !== 0) { - obj.numRows = Math.round(message.numRows); - } - return obj; - }, - - create, I>>(base?: I): PartitionStatus { - return PartitionStatus.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): PartitionStatus { - const message = createBasePartitionStatus(); - message.partitionId = object.partitionId ?? ""; - message.startTime = object.startTime ?? undefined; - message.endTime = object.endTime ?? undefined; - message.numRows = object.numRows ?? 0; - return message; - }, -}; - -function createBaseQRepMirrorStatus(): QRepMirrorStatus { - return { config: undefined, partitions: [] }; -} - -export const QRepMirrorStatus = { - encode(message: QRepMirrorStatus, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.config !== undefined) { - QRepConfig.encode(message.config, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.partitions) { - PartitionStatus.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QRepMirrorStatus { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQRepMirrorStatus(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.config = QRepConfig.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.partitions.push(PartitionStatus.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QRepMirrorStatus { - return { - config: isSet(object.config) ? QRepConfig.fromJSON(object.config) : undefined, - partitions: Array.isArray(object?.partitions) - ? object.partitions.map((e: any) => PartitionStatus.fromJSON(e)) - : [], - }; - }, - - toJSON(message: QRepMirrorStatus): unknown { - const obj: any = {}; - if (message.config !== undefined) { - obj.config = QRepConfig.toJSON(message.config); - } - if (message.partitions?.length) { - obj.partitions = message.partitions.map((e) => PartitionStatus.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): QRepMirrorStatus { - return QRepMirrorStatus.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): QRepMirrorStatus { - const message = createBaseQRepMirrorStatus(); - message.config = (object.config !== undefined && object.config !== null) - ? QRepConfig.fromPartial(object.config) - : undefined; - message.partitions = object.partitions?.map((e) => PartitionStatus.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseCDCSyncStatus(): CDCSyncStatus { - return { startLsn: 0, endLsn: 0, numRows: 0, startTime: undefined, endTime: undefined }; -} - -export const CDCSyncStatus = { - encode(message: CDCSyncStatus, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.startLsn !== 0) { - writer.uint32(8).int64(message.startLsn); - } - if (message.endLsn !== 0) { - writer.uint32(16).int64(message.endLsn); - } - if (message.numRows !== 0) { - writer.uint32(24).int32(message.numRows); - } - if (message.startTime !== undefined) { - Timestamp.encode(toTimestamp(message.startTime), writer.uint32(34).fork()).ldelim(); - } - if (message.endTime !== undefined) { - Timestamp.encode(toTimestamp(message.endTime), writer.uint32(42).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CDCSyncStatus { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCDCSyncStatus(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.startLsn = longToNumber(reader.int64() as Long); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.endLsn = longToNumber(reader.int64() as Long); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.numRows = reader.int32(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.startTime = fromTimestamp(Timestamp.decode(reader, reader.uint32())); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.endTime = fromTimestamp(Timestamp.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CDCSyncStatus { - return { - startLsn: isSet(object.startLsn) ? Number(object.startLsn) : 0, - endLsn: isSet(object.endLsn) ? Number(object.endLsn) : 0, - numRows: isSet(object.numRows) ? Number(object.numRows) : 0, - startTime: isSet(object.startTime) ? fromJsonTimestamp(object.startTime) : undefined, - endTime: isSet(object.endTime) ? fromJsonTimestamp(object.endTime) : undefined, - }; - }, - - toJSON(message: CDCSyncStatus): unknown { - const obj: any = {}; - if (message.startLsn !== 0) { - obj.startLsn = Math.round(message.startLsn); - } - if (message.endLsn !== 0) { - obj.endLsn = Math.round(message.endLsn); - } - if (message.numRows !== 0) { - obj.numRows = Math.round(message.numRows); - } - if (message.startTime !== undefined) { - obj.startTime = message.startTime.toISOString(); - } - if (message.endTime !== undefined) { - obj.endTime = message.endTime.toISOString(); - } - return obj; - }, - - create, I>>(base?: I): CDCSyncStatus { - return CDCSyncStatus.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CDCSyncStatus { - const message = createBaseCDCSyncStatus(); - message.startLsn = object.startLsn ?? 0; - message.endLsn = object.endLsn ?? 0; - message.numRows = object.numRows ?? 0; - message.startTime = object.startTime ?? undefined; - message.endTime = object.endTime ?? undefined; - return message; - }, -}; - -function createBasePeerSchemasResponse(): PeerSchemasResponse { - return { schemas: [] }; -} - -export const PeerSchemasResponse = { - encode(message: PeerSchemasResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.schemas) { - writer.uint32(10).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PeerSchemasResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePeerSchemasResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.schemas.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PeerSchemasResponse { - return { schemas: Array.isArray(object?.schemas) ? object.schemas.map((e: any) => String(e)) : [] }; - }, - - toJSON(message: PeerSchemasResponse): unknown { - const obj: any = {}; - if (message.schemas?.length) { - obj.schemas = message.schemas; - } - return obj; - }, - - create, I>>(base?: I): PeerSchemasResponse { - return PeerSchemasResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): PeerSchemasResponse { - const message = createBasePeerSchemasResponse(); - message.schemas = object.schemas?.map((e) => e) || []; - return message; - }, -}; - -function createBaseSchemaTablesRequest(): SchemaTablesRequest { - return { peerName: "", schemaName: "" }; -} - -export const SchemaTablesRequest = { - encode(message: SchemaTablesRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerName !== "") { - writer.uint32(10).string(message.peerName); - } - if (message.schemaName !== "") { - writer.uint32(18).string(message.schemaName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SchemaTablesRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSchemaTablesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.schemaName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SchemaTablesRequest { - return { - peerName: isSet(object.peerName) ? String(object.peerName) : "", - schemaName: isSet(object.schemaName) ? String(object.schemaName) : "", - }; - }, - - toJSON(message: SchemaTablesRequest): unknown { - const obj: any = {}; - if (message.peerName !== "") { - obj.peerName = message.peerName; - } - if (message.schemaName !== "") { - obj.schemaName = message.schemaName; - } - return obj; - }, - - create, I>>(base?: I): SchemaTablesRequest { - return SchemaTablesRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SchemaTablesRequest { - const message = createBaseSchemaTablesRequest(); - message.peerName = object.peerName ?? ""; - message.schemaName = object.schemaName ?? ""; - return message; - }, -}; - -function createBaseSchemaTablesResponse(): SchemaTablesResponse { - return { tables: [] }; -} - -export const SchemaTablesResponse = { - encode(message: SchemaTablesResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.tables) { - writer.uint32(10).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SchemaTablesResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSchemaTablesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.tables.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SchemaTablesResponse { - return { tables: Array.isArray(object?.tables) ? object.tables.map((e: any) => String(e)) : [] }; - }, - - toJSON(message: SchemaTablesResponse): unknown { - const obj: any = {}; - if (message.tables?.length) { - obj.tables = message.tables; - } - return obj; - }, - - create, I>>(base?: I): SchemaTablesResponse { - return SchemaTablesResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SchemaTablesResponse { - const message = createBaseSchemaTablesResponse(); - message.tables = object.tables?.map((e) => e) || []; - return message; - }, -}; - -function createBaseAllTablesResponse(): AllTablesResponse { - return { tables: [] }; -} - -export const AllTablesResponse = { - encode(message: AllTablesResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.tables) { - writer.uint32(10).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AllTablesResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAllTablesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.tables.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AllTablesResponse { - return { tables: Array.isArray(object?.tables) ? object.tables.map((e: any) => String(e)) : [] }; - }, - - toJSON(message: AllTablesResponse): unknown { - const obj: any = {}; - if (message.tables?.length) { - obj.tables = message.tables; - } - return obj; - }, - - create, I>>(base?: I): AllTablesResponse { - return AllTablesResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): AllTablesResponse { - const message = createBaseAllTablesResponse(); - message.tables = object.tables?.map((e) => e) || []; - return message; - }, -}; - -function createBaseTableColumnsRequest(): TableColumnsRequest { - return { peerName: "", schemaName: "", tableName: "" }; -} - -export const TableColumnsRequest = { - encode(message: TableColumnsRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerName !== "") { - writer.uint32(10).string(message.peerName); - } - if (message.schemaName !== "") { - writer.uint32(18).string(message.schemaName); - } - if (message.tableName !== "") { - writer.uint32(26).string(message.tableName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TableColumnsRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTableColumnsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.schemaName = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.tableName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TableColumnsRequest { - return { - peerName: isSet(object.peerName) ? String(object.peerName) : "", - schemaName: isSet(object.schemaName) ? String(object.schemaName) : "", - tableName: isSet(object.tableName) ? String(object.tableName) : "", - }; - }, - - toJSON(message: TableColumnsRequest): unknown { - const obj: any = {}; - if (message.peerName !== "") { - obj.peerName = message.peerName; - } - if (message.schemaName !== "") { - obj.schemaName = message.schemaName; - } - if (message.tableName !== "") { - obj.tableName = message.tableName; - } - return obj; - }, - - create, I>>(base?: I): TableColumnsRequest { - return TableColumnsRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TableColumnsRequest { - const message = createBaseTableColumnsRequest(); - message.peerName = object.peerName ?? ""; - message.schemaName = object.schemaName ?? ""; - message.tableName = object.tableName ?? ""; - return message; - }, -}; - -function createBaseTableColumnsResponse(): TableColumnsResponse { - return { columns: [] }; -} - -export const TableColumnsResponse = { - encode(message: TableColumnsResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.columns) { - writer.uint32(10).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): TableColumnsResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTableColumnsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.columns.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): TableColumnsResponse { - return { columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => String(e)) : [] }; - }, - - toJSON(message: TableColumnsResponse): unknown { - const obj: any = {}; - if (message.columns?.length) { - obj.columns = message.columns; - } - return obj; - }, - - create, I>>(base?: I): TableColumnsResponse { - return TableColumnsResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): TableColumnsResponse { - const message = createBaseTableColumnsResponse(); - message.columns = object.columns?.map((e) => e) || []; - return message; - }, -}; - -function createBasePostgresPeerActivityInfoRequest(): PostgresPeerActivityInfoRequest { - return { peerName: "" }; -} - -export const PostgresPeerActivityInfoRequest = { - encode(message: PostgresPeerActivityInfoRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.peerName !== "") { - writer.uint32(10).string(message.peerName); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PostgresPeerActivityInfoRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePostgresPeerActivityInfoRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.peerName = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PostgresPeerActivityInfoRequest { - return { peerName: isSet(object.peerName) ? String(object.peerName) : "" }; - }, - - toJSON(message: PostgresPeerActivityInfoRequest): unknown { - const obj: any = {}; - if (message.peerName !== "") { - obj.peerName = message.peerName; - } - return obj; - }, - - create, I>>(base?: I): PostgresPeerActivityInfoRequest { - return PostgresPeerActivityInfoRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>( - object: I, - ): PostgresPeerActivityInfoRequest { - const message = createBasePostgresPeerActivityInfoRequest(); - message.peerName = object.peerName ?? ""; - return message; - }, -}; - -function createBaseSlotInfo(): SlotInfo { - return { slotName: "", redoLSN: "", restartLSN: "", active: false, lagInMb: 0, confirmedFlushLSN: "", walStatus: "" }; -} - -export const SlotInfo = { - encode(message: SlotInfo, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.slotName !== "") { - writer.uint32(10).string(message.slotName); - } - if (message.redoLSN !== "") { - writer.uint32(18).string(message.redoLSN); - } - if (message.restartLSN !== "") { - writer.uint32(26).string(message.restartLSN); - } - if (message.active === true) { - writer.uint32(32).bool(message.active); - } - if (message.lagInMb !== 0) { - writer.uint32(45).float(message.lagInMb); - } - if (message.confirmedFlushLSN !== "") { - writer.uint32(50).string(message.confirmedFlushLSN); - } - if (message.walStatus !== "") { - writer.uint32(58).string(message.walStatus); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SlotInfo { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSlotInfo(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.slotName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.redoLSN = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.restartLSN = reader.string(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.active = reader.bool(); - continue; - case 5: - if (tag !== 45) { - break; - } - - message.lagInMb = reader.float(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.confirmedFlushLSN = reader.string(); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.walStatus = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SlotInfo { - return { - slotName: isSet(object.slotName) ? String(object.slotName) : "", - redoLSN: isSet(object.redoLSN) ? String(object.redoLSN) : "", - restartLSN: isSet(object.restartLSN) ? String(object.restartLSN) : "", - active: isSet(object.active) ? Boolean(object.active) : false, - lagInMb: isSet(object.lagInMb) ? Number(object.lagInMb) : 0, - confirmedFlushLSN: isSet(object.confirmedFlushLSN) ? String(object.confirmedFlushLSN) : "", - walStatus: isSet(object.walStatus) ? String(object.walStatus) : "", - }; - }, - - toJSON(message: SlotInfo): unknown { - const obj: any = {}; - if (message.slotName !== "") { - obj.slotName = message.slotName; - } - if (message.redoLSN !== "") { - obj.redoLSN = message.redoLSN; - } - if (message.restartLSN !== "") { - obj.restartLSN = message.restartLSN; - } - if (message.active === true) { - obj.active = message.active; - } - if (message.lagInMb !== 0) { - obj.lagInMb = message.lagInMb; - } - if (message.confirmedFlushLSN !== "") { - obj.confirmedFlushLSN = message.confirmedFlushLSN; - } - if (message.walStatus !== "") { - obj.walStatus = message.walStatus; - } - return obj; - }, - - create, I>>(base?: I): SlotInfo { - return SlotInfo.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SlotInfo { - const message = createBaseSlotInfo(); - message.slotName = object.slotName ?? ""; - message.redoLSN = object.redoLSN ?? ""; - message.restartLSN = object.restartLSN ?? ""; - message.active = object.active ?? false; - message.lagInMb = object.lagInMb ?? 0; - message.confirmedFlushLSN = object.confirmedFlushLSN ?? ""; - message.walStatus = object.walStatus ?? ""; - return message; - }, -}; - -function createBaseStatInfo(): StatInfo { - return { pid: 0, waitEvent: "", waitEventType: "", queryStart: "", query: "", duration: 0 }; -} - -export const StatInfo = { - encode(message: StatInfo, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.pid !== 0) { - writer.uint32(8).int64(message.pid); - } - if (message.waitEvent !== "") { - writer.uint32(18).string(message.waitEvent); - } - if (message.waitEventType !== "") { - writer.uint32(26).string(message.waitEventType); - } - if (message.queryStart !== "") { - writer.uint32(34).string(message.queryStart); - } - if (message.query !== "") { - writer.uint32(42).string(message.query); - } - if (message.duration !== 0) { - writer.uint32(53).float(message.duration); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): StatInfo { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStatInfo(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.pid = longToNumber(reader.int64() as Long); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.waitEvent = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.waitEventType = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.queryStart = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.query = reader.string(); - continue; - case 6: - if (tag !== 53) { - break; - } - - message.duration = reader.float(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): StatInfo { - return { - pid: isSet(object.pid) ? Number(object.pid) : 0, - waitEvent: isSet(object.waitEvent) ? String(object.waitEvent) : "", - waitEventType: isSet(object.waitEventType) ? String(object.waitEventType) : "", - queryStart: isSet(object.queryStart) ? String(object.queryStart) : "", - query: isSet(object.query) ? String(object.query) : "", - duration: isSet(object.duration) ? Number(object.duration) : 0, - }; - }, - - toJSON(message: StatInfo): unknown { - const obj: any = {}; - if (message.pid !== 0) { - obj.pid = Math.round(message.pid); - } - if (message.waitEvent !== "") { - obj.waitEvent = message.waitEvent; - } - if (message.waitEventType !== "") { - obj.waitEventType = message.waitEventType; - } - if (message.queryStart !== "") { - obj.queryStart = message.queryStart; - } - if (message.query !== "") { - obj.query = message.query; - } - if (message.duration !== 0) { - obj.duration = message.duration; - } - return obj; - }, - - create, I>>(base?: I): StatInfo { - return StatInfo.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): StatInfo { - const message = createBaseStatInfo(); - message.pid = object.pid ?? 0; - message.waitEvent = object.waitEvent ?? ""; - message.waitEventType = object.waitEventType ?? ""; - message.queryStart = object.queryStart ?? ""; - message.query = object.query ?? ""; - message.duration = object.duration ?? 0; - return message; - }, -}; - -function createBasePeerSlotResponse(): PeerSlotResponse { - return { slotData: [] }; -} - -export const PeerSlotResponse = { - encode(message: PeerSlotResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.slotData) { - SlotInfo.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PeerSlotResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePeerSlotResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.slotData.push(SlotInfo.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PeerSlotResponse { - return { slotData: Array.isArray(object?.slotData) ? object.slotData.map((e: any) => SlotInfo.fromJSON(e)) : [] }; - }, - - toJSON(message: PeerSlotResponse): unknown { - const obj: any = {}; - if (message.slotData?.length) { - obj.slotData = message.slotData.map((e) => SlotInfo.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): PeerSlotResponse { - return PeerSlotResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): PeerSlotResponse { - const message = createBasePeerSlotResponse(); - message.slotData = object.slotData?.map((e) => SlotInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBasePeerStatResponse(): PeerStatResponse { - return { statData: [] }; -} - -export const PeerStatResponse = { - encode(message: PeerStatResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.statData) { - StatInfo.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PeerStatResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePeerStatResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.statData.push(StatInfo.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PeerStatResponse { - return { statData: Array.isArray(object?.statData) ? object.statData.map((e: any) => StatInfo.fromJSON(e)) : [] }; - }, - - toJSON(message: PeerStatResponse): unknown { - const obj: any = {}; - if (message.statData?.length) { - obj.statData = message.statData.map((e) => StatInfo.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): PeerStatResponse { - return PeerStatResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): PeerStatResponse { - const message = createBasePeerStatResponse(); - message.statData = object.statData?.map((e) => StatInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSnapshotStatus(): SnapshotStatus { - return { clones: [] }; -} - -export const SnapshotStatus = { - encode(message: SnapshotStatus, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - for (const v of message.clones) { - QRepMirrorStatus.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SnapshotStatus { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSnapshotStatus(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.clones.push(QRepMirrorStatus.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SnapshotStatus { - return { clones: Array.isArray(object?.clones) ? object.clones.map((e: any) => QRepMirrorStatus.fromJSON(e)) : [] }; - }, - - toJSON(message: SnapshotStatus): unknown { - const obj: any = {}; - if (message.clones?.length) { - obj.clones = message.clones.map((e) => QRepMirrorStatus.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): SnapshotStatus { - return SnapshotStatus.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): SnapshotStatus { - const message = createBaseSnapshotStatus(); - message.clones = object.clones?.map((e) => QRepMirrorStatus.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseCDCMirrorStatus(): CDCMirrorStatus { - return { config: undefined, snapshotStatus: undefined, cdcSyncs: [] }; -} - -export const CDCMirrorStatus = { - encode(message: CDCMirrorStatus, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.config !== undefined) { - FlowConnectionConfigs.encode(message.config, writer.uint32(10).fork()).ldelim(); - } - if (message.snapshotStatus !== undefined) { - SnapshotStatus.encode(message.snapshotStatus, writer.uint32(18).fork()).ldelim(); - } - for (const v of message.cdcSyncs) { - CDCSyncStatus.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CDCMirrorStatus { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCDCMirrorStatus(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.config = FlowConnectionConfigs.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.snapshotStatus = SnapshotStatus.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.cdcSyncs.push(CDCSyncStatus.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CDCMirrorStatus { - return { - config: isSet(object.config) ? FlowConnectionConfigs.fromJSON(object.config) : undefined, - snapshotStatus: isSet(object.snapshotStatus) ? SnapshotStatus.fromJSON(object.snapshotStatus) : undefined, - cdcSyncs: Array.isArray(object?.cdcSyncs) ? object.cdcSyncs.map((e: any) => CDCSyncStatus.fromJSON(e)) : [], - }; - }, - - toJSON(message: CDCMirrorStatus): unknown { - const obj: any = {}; - if (message.config !== undefined) { - obj.config = FlowConnectionConfigs.toJSON(message.config); - } - if (message.snapshotStatus !== undefined) { - obj.snapshotStatus = SnapshotStatus.toJSON(message.snapshotStatus); - } - if (message.cdcSyncs?.length) { - obj.cdcSyncs = message.cdcSyncs.map((e) => CDCSyncStatus.toJSON(e)); - } - return obj; - }, - - create, I>>(base?: I): CDCMirrorStatus { - return CDCMirrorStatus.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): CDCMirrorStatus { - const message = createBaseCDCMirrorStatus(); - message.config = (object.config !== undefined && object.config !== null) - ? FlowConnectionConfigs.fromPartial(object.config) - : undefined; - message.snapshotStatus = (object.snapshotStatus !== undefined && object.snapshotStatus !== null) - ? SnapshotStatus.fromPartial(object.snapshotStatus) - : undefined; - message.cdcSyncs = object.cdcSyncs?.map((e) => CDCSyncStatus.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseMirrorStatusResponse(): MirrorStatusResponse { - return { flowJobName: "", qrepStatus: undefined, cdcStatus: undefined, errorMessage: "" }; -} - -export const MirrorStatusResponse = { - encode(message: MirrorStatusResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.flowJobName !== "") { - writer.uint32(10).string(message.flowJobName); - } - if (message.qrepStatus !== undefined) { - QRepMirrorStatus.encode(message.qrepStatus, writer.uint32(18).fork()).ldelim(); - } - if (message.cdcStatus !== undefined) { - CDCMirrorStatus.encode(message.cdcStatus, writer.uint32(26).fork()).ldelim(); - } - if (message.errorMessage !== "") { - writer.uint32(34).string(message.errorMessage); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MirrorStatusResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMirrorStatusResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.qrepStatus = QRepMirrorStatus.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.cdcStatus = CDCMirrorStatus.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.errorMessage = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MirrorStatusResponse { - return { - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - qrepStatus: isSet(object.qrepStatus) ? QRepMirrorStatus.fromJSON(object.qrepStatus) : undefined, - cdcStatus: isSet(object.cdcStatus) ? CDCMirrorStatus.fromJSON(object.cdcStatus) : undefined, - errorMessage: isSet(object.errorMessage) ? String(object.errorMessage) : "", - }; - }, - - toJSON(message: MirrorStatusResponse): unknown { - const obj: any = {}; - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.qrepStatus !== undefined) { - obj.qrepStatus = QRepMirrorStatus.toJSON(message.qrepStatus); - } - if (message.cdcStatus !== undefined) { - obj.cdcStatus = CDCMirrorStatus.toJSON(message.cdcStatus); - } - if (message.errorMessage !== "") { - obj.errorMessage = message.errorMessage; - } - return obj; - }, - - create, I>>(base?: I): MirrorStatusResponse { - return MirrorStatusResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): MirrorStatusResponse { - const message = createBaseMirrorStatusResponse(); - message.flowJobName = object.flowJobName ?? ""; - message.qrepStatus = (object.qrepStatus !== undefined && object.qrepStatus !== null) - ? QRepMirrorStatus.fromPartial(object.qrepStatus) - : undefined; - message.cdcStatus = (object.cdcStatus !== undefined && object.cdcStatus !== null) - ? CDCMirrorStatus.fromPartial(object.cdcStatus) - : undefined; - message.errorMessage = object.errorMessage ?? ""; - return message; - }, -}; - -function createBaseFlowStateChangeRequest(): FlowStateChangeRequest { - return { workflowId: "", flowJobName: "", requestedFlowState: 0 }; -} - -export const FlowStateChangeRequest = { - encode(message: FlowStateChangeRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.workflowId !== "") { - writer.uint32(10).string(message.workflowId); - } - if (message.flowJobName !== "") { - writer.uint32(18).string(message.flowJobName); - } - if (message.requestedFlowState !== 0) { - writer.uint32(24).int32(message.requestedFlowState); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FlowStateChangeRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFlowStateChangeRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.workflowId = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.flowJobName = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.requestedFlowState = reader.int32() as any; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FlowStateChangeRequest { - return { - workflowId: isSet(object.workflowId) ? String(object.workflowId) : "", - flowJobName: isSet(object.flowJobName) ? String(object.flowJobName) : "", - requestedFlowState: isSet(object.requestedFlowState) ? flowStateFromJSON(object.requestedFlowState) : 0, - }; - }, - - toJSON(message: FlowStateChangeRequest): unknown { - const obj: any = {}; - if (message.workflowId !== "") { - obj.workflowId = message.workflowId; - } - if (message.flowJobName !== "") { - obj.flowJobName = message.flowJobName; - } - if (message.requestedFlowState !== 0) { - obj.requestedFlowState = flowStateToJSON(message.requestedFlowState); - } - return obj; - }, - - create, I>>(base?: I): FlowStateChangeRequest { - return FlowStateChangeRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): FlowStateChangeRequest { - const message = createBaseFlowStateChangeRequest(); - message.workflowId = object.workflowId ?? ""; - message.flowJobName = object.flowJobName ?? ""; - message.requestedFlowState = object.requestedFlowState ?? 0; - return message; - }, -}; - -function createBaseFlowStateChangeResponse(): FlowStateChangeResponse { - return { ok: false, errorMessage: "" }; -} - -export const FlowStateChangeResponse = { - encode(message: FlowStateChangeResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.ok === true) { - writer.uint32(8).bool(message.ok); - } - if (message.errorMessage !== "") { - writer.uint32(18).string(message.errorMessage); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FlowStateChangeResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFlowStateChangeResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.ok = reader.bool(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.errorMessage = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FlowStateChangeResponse { - return { - ok: isSet(object.ok) ? Boolean(object.ok) : false, - errorMessage: isSet(object.errorMessage) ? String(object.errorMessage) : "", - }; - }, - - toJSON(message: FlowStateChangeResponse): unknown { - const obj: any = {}; - if (message.ok === true) { - obj.ok = message.ok; - } - if (message.errorMessage !== "") { - obj.errorMessage = message.errorMessage; - } - return obj; - }, - - create, I>>(base?: I): FlowStateChangeResponse { - return FlowStateChangeResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): FlowStateChangeResponse { - const message = createBaseFlowStateChangeResponse(); - message.ok = object.ok ?? false; - message.errorMessage = object.errorMessage ?? ""; - return message; - }, -}; - -function createBasePeerDBVersionRequest(): PeerDBVersionRequest { - return {}; -} - -export const PeerDBVersionRequest = { - encode(_: PeerDBVersionRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PeerDBVersionRequest { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePeerDBVersionRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): PeerDBVersionRequest { - return {}; - }, - - toJSON(_: PeerDBVersionRequest): unknown { - const obj: any = {}; - return obj; - }, - - create, I>>(base?: I): PeerDBVersionRequest { - return PeerDBVersionRequest.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(_: I): PeerDBVersionRequest { - const message = createBasePeerDBVersionRequest(); - return message; - }, -}; - -function createBasePeerDBVersionResponse(): PeerDBVersionResponse { - return { version: "" }; -} - -export const PeerDBVersionResponse = { - encode(message: PeerDBVersionResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.version !== "") { - writer.uint32(10).string(message.version); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PeerDBVersionResponse { - const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePeerDBVersionResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.version = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PeerDBVersionResponse { - return { version: isSet(object.version) ? String(object.version) : "" }; - }, - - toJSON(message: PeerDBVersionResponse): unknown { - const obj: any = {}; - if (message.version !== "") { - obj.version = message.version; - } - return obj; - }, - - create, I>>(base?: I): PeerDBVersionResponse { - return PeerDBVersionResponse.fromPartial(base ?? ({} as any)); - }, - fromPartial, I>>(object: I): PeerDBVersionResponse { - const message = createBasePeerDBVersionResponse(); - message.version = object.version ?? ""; - return message; - }, -}; - -export type FlowServiceService = typeof FlowServiceService; -export const FlowServiceService = { - validatePeer: { - path: "/peerdb_route.FlowService/ValidatePeer", - requestStream: false, - responseStream: false, - requestSerialize: (value: ValidatePeerRequest) => Buffer.from(ValidatePeerRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => ValidatePeerRequest.decode(value), - responseSerialize: (value: ValidatePeerResponse) => Buffer.from(ValidatePeerResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => ValidatePeerResponse.decode(value), - }, - createPeer: { - path: "/peerdb_route.FlowService/CreatePeer", - requestStream: false, - responseStream: false, - requestSerialize: (value: CreatePeerRequest) => Buffer.from(CreatePeerRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => CreatePeerRequest.decode(value), - responseSerialize: (value: CreatePeerResponse) => Buffer.from(CreatePeerResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => CreatePeerResponse.decode(value), - }, - dropPeer: { - path: "/peerdb_route.FlowService/DropPeer", - requestStream: false, - responseStream: false, - requestSerialize: (value: DropPeerRequest) => Buffer.from(DropPeerRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => DropPeerRequest.decode(value), - responseSerialize: (value: DropPeerResponse) => Buffer.from(DropPeerResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => DropPeerResponse.decode(value), - }, - createCdcFlow: { - path: "/peerdb_route.FlowService/CreateCDCFlow", - requestStream: false, - responseStream: false, - requestSerialize: (value: CreateCDCFlowRequest) => Buffer.from(CreateCDCFlowRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => CreateCDCFlowRequest.decode(value), - responseSerialize: (value: CreateCDCFlowResponse) => Buffer.from(CreateCDCFlowResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => CreateCDCFlowResponse.decode(value), - }, - createQRepFlow: { - path: "/peerdb_route.FlowService/CreateQRepFlow", - requestStream: false, - responseStream: false, - requestSerialize: (value: CreateQRepFlowRequest) => Buffer.from(CreateQRepFlowRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => CreateQRepFlowRequest.decode(value), - responseSerialize: (value: CreateQRepFlowResponse) => Buffer.from(CreateQRepFlowResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => CreateQRepFlowResponse.decode(value), - }, - getSchemas: { - path: "/peerdb_route.FlowService/GetSchemas", - requestStream: false, - responseStream: false, - requestSerialize: (value: PostgresPeerActivityInfoRequest) => - Buffer.from(PostgresPeerActivityInfoRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => PostgresPeerActivityInfoRequest.decode(value), - responseSerialize: (value: PeerSchemasResponse) => Buffer.from(PeerSchemasResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => PeerSchemasResponse.decode(value), - }, - getTablesInSchema: { - path: "/peerdb_route.FlowService/GetTablesInSchema", - requestStream: false, - responseStream: false, - requestSerialize: (value: SchemaTablesRequest) => Buffer.from(SchemaTablesRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => SchemaTablesRequest.decode(value), - responseSerialize: (value: SchemaTablesResponse) => Buffer.from(SchemaTablesResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => SchemaTablesResponse.decode(value), - }, - getAllTables: { - path: "/peerdb_route.FlowService/GetAllTables", - requestStream: false, - responseStream: false, - requestSerialize: (value: PostgresPeerActivityInfoRequest) => - Buffer.from(PostgresPeerActivityInfoRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => PostgresPeerActivityInfoRequest.decode(value), - responseSerialize: (value: AllTablesResponse) => Buffer.from(AllTablesResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => AllTablesResponse.decode(value), - }, - getColumns: { - path: "/peerdb_route.FlowService/GetColumns", - requestStream: false, - responseStream: false, - requestSerialize: (value: TableColumnsRequest) => Buffer.from(TableColumnsRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => TableColumnsRequest.decode(value), - responseSerialize: (value: TableColumnsResponse) => Buffer.from(TableColumnsResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => TableColumnsResponse.decode(value), - }, - getSlotInfo: { - path: "/peerdb_route.FlowService/GetSlotInfo", - requestStream: false, - responseStream: false, - requestSerialize: (value: PostgresPeerActivityInfoRequest) => - Buffer.from(PostgresPeerActivityInfoRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => PostgresPeerActivityInfoRequest.decode(value), - responseSerialize: (value: PeerSlotResponse) => Buffer.from(PeerSlotResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => PeerSlotResponse.decode(value), - }, - getStatInfo: { - path: "/peerdb_route.FlowService/GetStatInfo", - requestStream: false, - responseStream: false, - requestSerialize: (value: PostgresPeerActivityInfoRequest) => - Buffer.from(PostgresPeerActivityInfoRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => PostgresPeerActivityInfoRequest.decode(value), - responseSerialize: (value: PeerStatResponse) => Buffer.from(PeerStatResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => PeerStatResponse.decode(value), - }, - shutdownFlow: { - path: "/peerdb_route.FlowService/ShutdownFlow", - requestStream: false, - responseStream: false, - requestSerialize: (value: ShutdownRequest) => Buffer.from(ShutdownRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => ShutdownRequest.decode(value), - responseSerialize: (value: ShutdownResponse) => Buffer.from(ShutdownResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => ShutdownResponse.decode(value), - }, - flowStateChange: { - path: "/peerdb_route.FlowService/FlowStateChange", - requestStream: false, - responseStream: false, - requestSerialize: (value: FlowStateChangeRequest) => Buffer.from(FlowStateChangeRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => FlowStateChangeRequest.decode(value), - responseSerialize: (value: FlowStateChangeResponse) => Buffer.from(FlowStateChangeResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => FlowStateChangeResponse.decode(value), - }, - mirrorStatus: { - path: "/peerdb_route.FlowService/MirrorStatus", - requestStream: false, - responseStream: false, - requestSerialize: (value: MirrorStatusRequest) => Buffer.from(MirrorStatusRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => MirrorStatusRequest.decode(value), - responseSerialize: (value: MirrorStatusResponse) => Buffer.from(MirrorStatusResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MirrorStatusResponse.decode(value), - }, - getVersion: { - path: "/peerdb_route.FlowService/GetVersion", - requestStream: false, - responseStream: false, - requestSerialize: (value: PeerDBVersionRequest) => Buffer.from(PeerDBVersionRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => PeerDBVersionRequest.decode(value), - responseSerialize: (value: PeerDBVersionResponse) => Buffer.from(PeerDBVersionResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => PeerDBVersionResponse.decode(value), - }, -} as const; - -export interface FlowServiceServer extends UntypedServiceImplementation { - validatePeer: handleUnaryCall; - createPeer: handleUnaryCall; - dropPeer: handleUnaryCall; - createCdcFlow: handleUnaryCall; - createQRepFlow: handleUnaryCall; - getSchemas: handleUnaryCall; - getTablesInSchema: handleUnaryCall; - getAllTables: handleUnaryCall; - getColumns: handleUnaryCall; - getSlotInfo: handleUnaryCall; - getStatInfo: handleUnaryCall; - shutdownFlow: handleUnaryCall; - flowStateChange: handleUnaryCall; - mirrorStatus: handleUnaryCall; - getVersion: handleUnaryCall; -} - -export interface FlowServiceClient extends Client { - validatePeer( - request: ValidatePeerRequest, - callback: (error: ServiceError | null, response: ValidatePeerResponse) => void, - ): ClientUnaryCall; - validatePeer( - request: ValidatePeerRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: ValidatePeerResponse) => void, - ): ClientUnaryCall; - validatePeer( - request: ValidatePeerRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: ValidatePeerResponse) => void, - ): ClientUnaryCall; - createPeer( - request: CreatePeerRequest, - callback: (error: ServiceError | null, response: CreatePeerResponse) => void, - ): ClientUnaryCall; - createPeer( - request: CreatePeerRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: CreatePeerResponse) => void, - ): ClientUnaryCall; - createPeer( - request: CreatePeerRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: CreatePeerResponse) => void, - ): ClientUnaryCall; - dropPeer( - request: DropPeerRequest, - callback: (error: ServiceError | null, response: DropPeerResponse) => void, - ): ClientUnaryCall; - dropPeer( - request: DropPeerRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: DropPeerResponse) => void, - ): ClientUnaryCall; - dropPeer( - request: DropPeerRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: DropPeerResponse) => void, - ): ClientUnaryCall; - createCdcFlow( - request: CreateCDCFlowRequest, - callback: (error: ServiceError | null, response: CreateCDCFlowResponse) => void, - ): ClientUnaryCall; - createCdcFlow( - request: CreateCDCFlowRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: CreateCDCFlowResponse) => void, - ): ClientUnaryCall; - createCdcFlow( - request: CreateCDCFlowRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: CreateCDCFlowResponse) => void, - ): ClientUnaryCall; - createQRepFlow( - request: CreateQRepFlowRequest, - callback: (error: ServiceError | null, response: CreateQRepFlowResponse) => void, - ): ClientUnaryCall; - createQRepFlow( - request: CreateQRepFlowRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: CreateQRepFlowResponse) => void, - ): ClientUnaryCall; - createQRepFlow( - request: CreateQRepFlowRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: CreateQRepFlowResponse) => void, - ): ClientUnaryCall; - getSchemas( - request: PostgresPeerActivityInfoRequest, - callback: (error: ServiceError | null, response: PeerSchemasResponse) => void, - ): ClientUnaryCall; - getSchemas( - request: PostgresPeerActivityInfoRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: PeerSchemasResponse) => void, - ): ClientUnaryCall; - getSchemas( - request: PostgresPeerActivityInfoRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: PeerSchemasResponse) => void, - ): ClientUnaryCall; - getTablesInSchema( - request: SchemaTablesRequest, - callback: (error: ServiceError | null, response: SchemaTablesResponse) => void, - ): ClientUnaryCall; - getTablesInSchema( - request: SchemaTablesRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: SchemaTablesResponse) => void, - ): ClientUnaryCall; - getTablesInSchema( - request: SchemaTablesRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: SchemaTablesResponse) => void, - ): ClientUnaryCall; - getAllTables( - request: PostgresPeerActivityInfoRequest, - callback: (error: ServiceError | null, response: AllTablesResponse) => void, - ): ClientUnaryCall; - getAllTables( - request: PostgresPeerActivityInfoRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: AllTablesResponse) => void, - ): ClientUnaryCall; - getAllTables( - request: PostgresPeerActivityInfoRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: AllTablesResponse) => void, - ): ClientUnaryCall; - getColumns( - request: TableColumnsRequest, - callback: (error: ServiceError | null, response: TableColumnsResponse) => void, - ): ClientUnaryCall; - getColumns( - request: TableColumnsRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: TableColumnsResponse) => void, - ): ClientUnaryCall; - getColumns( - request: TableColumnsRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: TableColumnsResponse) => void, - ): ClientUnaryCall; - getSlotInfo( - request: PostgresPeerActivityInfoRequest, - callback: (error: ServiceError | null, response: PeerSlotResponse) => void, - ): ClientUnaryCall; - getSlotInfo( - request: PostgresPeerActivityInfoRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: PeerSlotResponse) => void, - ): ClientUnaryCall; - getSlotInfo( - request: PostgresPeerActivityInfoRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: PeerSlotResponse) => void, - ): ClientUnaryCall; - getStatInfo( - request: PostgresPeerActivityInfoRequest, - callback: (error: ServiceError | null, response: PeerStatResponse) => void, - ): ClientUnaryCall; - getStatInfo( - request: PostgresPeerActivityInfoRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: PeerStatResponse) => void, - ): ClientUnaryCall; - getStatInfo( - request: PostgresPeerActivityInfoRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: PeerStatResponse) => void, - ): ClientUnaryCall; - shutdownFlow( - request: ShutdownRequest, - callback: (error: ServiceError | null, response: ShutdownResponse) => void, - ): ClientUnaryCall; - shutdownFlow( - request: ShutdownRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: ShutdownResponse) => void, - ): ClientUnaryCall; - shutdownFlow( - request: ShutdownRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: ShutdownResponse) => void, - ): ClientUnaryCall; - flowStateChange( - request: FlowStateChangeRequest, - callback: (error: ServiceError | null, response: FlowStateChangeResponse) => void, - ): ClientUnaryCall; - flowStateChange( - request: FlowStateChangeRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: FlowStateChangeResponse) => void, - ): ClientUnaryCall; - flowStateChange( - request: FlowStateChangeRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: FlowStateChangeResponse) => void, - ): ClientUnaryCall; - mirrorStatus( - request: MirrorStatusRequest, - callback: (error: ServiceError | null, response: MirrorStatusResponse) => void, - ): ClientUnaryCall; - mirrorStatus( - request: MirrorStatusRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: MirrorStatusResponse) => void, - ): ClientUnaryCall; - mirrorStatus( - request: MirrorStatusRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: MirrorStatusResponse) => void, - ): ClientUnaryCall; - getVersion( - request: PeerDBVersionRequest, - callback: (error: ServiceError | null, response: PeerDBVersionResponse) => void, - ): ClientUnaryCall; - getVersion( - request: PeerDBVersionRequest, - metadata: Metadata, - callback: (error: ServiceError | null, response: PeerDBVersionResponse) => void, - ): ClientUnaryCall; - getVersion( - request: PeerDBVersionRequest, - metadata: Metadata, - options: Partial, - callback: (error: ServiceError | null, response: PeerDBVersionResponse) => void, - ): ClientUnaryCall; -} - -export const FlowServiceClient = makeGenericClientConstructor( - FlowServiceService, - "peerdb_route.FlowService", -) as unknown as { - new (address: string, credentials: ChannelCredentials, options?: Partial): FlowServiceClient; - service: typeof FlowServiceService; -}; - -declare const self: any | undefined; -declare const window: any | undefined; -declare const global: any | undefined; -const tsProtoGlobalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function toTimestamp(date: Date): Timestamp { - const seconds = date.getTime() / 1_000; - const nanos = (date.getTime() % 1_000) * 1_000_000; - return { seconds, nanos }; -} - -function fromTimestamp(t: Timestamp): Date { - let millis = (t.seconds || 0) * 1_000; - millis += (t.nanos || 0) / 1_000_000; - return new Date(millis); -} - -function fromJsonTimestamp(o: any): Date { - if (o instanceof Date) { - return o; - } else if (typeof o === "string") { - return new Date(o); - } else { - return fromTimestamp(Timestamp.fromJSON(o)); - } -} - -function longToNumber(long: Long): number { - if (long.gt(Number.MAX_SAFE_INTEGER)) { - throw new tsProtoGlobalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); - } - return long.toNumber(); -} - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -}