forked from mailgun/kafka-pixy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
grpc.proto
173 lines (145 loc) · 6.63 KB
/
grpc.proto
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
syntax = "proto3";
option go_package = "pb";
service KafkaPixy {
// Produce writes a message to a Kafka topic.
//
// If ProdReq.async_mode is false (default value) then the request will
// block until the message is written to all ISR. In this case the respose
// will contain the partition and offset of the message. This has to be
// used to achive at-least-once deliverability guarantee.
// If ProdReq.async_mode is true, then Kafka-Pixy returns immediately after
// it gets the request and performs write on the backgroud. This mode
// ensures highest throughput but messages can be lost, e.g. if the host
// crashes before Kafka-Pixy has a chance to complete write.
//
// Hash of ProdReq.key_value is used to determine a partition that the
// message should be written to. If you want a message to go to an random
// partition then set ProdReq.key_undefined to true. Note that if both
// ProdReq.key_undefined and ProdReq.key_value are left default, which is
// empty string and false respectively, then messages will be consitently
// written to a partiticular partition selected by the hash of an empty
// string.
//
// gRPC error codes:
// * Invalid Argument (3): see the status description for details;
// * Internal (13): see the status description and logs for details;
rpc Produce (ProdRq) returns (ProdRs) {}
// Consume reads a message from a topic and optionally acknowledges a
// message previously consumed from the same topic.
//
// Requests are performed in long polling fation, that is if all available
// messages have been consumed then the request will block for
// config.yaml:proxies.<cluster>.consumer.long_polling_timeout waiting for
// new messages. If no new messages is produced while waiting the request
// will return gRPC error with 408 status code.
//
// To consume the first message set ConsNAckReq.no_ack to true, since there
// is no message to acknowledge at this point. In the second and all
// subsequent calls of the method set ConsNAckReq.ack_partition and
// ConsNAckReq.ack_offset to the respective values of ConsRes returned by
// the previous method call. To acknowledge the last consumed message before
// teminating the application call Ack method.
//
// If a message is not acknowledged within
// config.yaml:proxies.<cluster>.consumer.ack_timeout the it will be returned
// by Kafka-Pixy in ConsRes again possibly to another application.
//
// If at-least-once delivery guarantee and retries are not desirable, then
// you can set ConsNAckReq.auto_ack to true and Kafka-Pixy will acknowledge
// messages automatically before returning them in ConsRes.
//
// gRPC error codes:
// * Not Found (5): It just means that all message has been consumed and
// the long polling timeout has elaspsed. Just keep calling this method
// in a loop;
// * Resource Exhausted (8): too many consume requests. Either reduce the
// number of consuming threads or increase
// config.yaml:proxies.<cluster>.consumer.channel_buffer_size;
// * Invalid Argument (3): see the status description for details;
// * Internal (13): see the status description and logs for details;
rpc ConsumeNAck (ConsNAckRq) returns (ConsRs) {}
// Ack acknowledges a message earlier consumed from a topic.
//
// This method is provided solely to acknowledge the last consumed message
// before the application terminates. In all other cases ConsumeNAck should
// be used.
//
// gRPC error codes:
// * Invalid Argument (3): see the status description for details;
// * Internal (13): see the status description and logs for details;
rpc Ack (AckRq) returns (AckRs) {}
}
message ProdRq {
// Name of a Kafka cluster to operate on.
string cluster = 1;
// Name of a topic to produce to.
string topic = 2;
// Hash of the key is used to determine the partition to produce to. By
// default it is an empty array which is a valid key, unless key_undefined
// is set to true and then a random partition is selected.
bytes key_value = 3;
// If true then the message is written to a random partition, otherwise
// hash of key_value is used to determine the partition.
bool key_undefined = 4;
// Message body.
bytes message = 5;
// If true then the method returns immediately after Kafka-Pixy get the
// produce request, and the message is written to Kafka asynchronously.
// In that case partition and offset returned in response should be ignored.
// If false (by default), then a response is returned after the message is
// written to all ISR and response provides partition+offset where it was
// actually written.
bool async_mode = 6;
}
message ProdRs {
// Partition the message was written to. The value only makes sense if
// ProdReq.async_mode was false.
int32 partition = 1;
// Offset the message was written to. The value only makes sense if
// ProdReq.async_mode was false.
int64 offset = 2;
}
message ConsNAckRq {
// Name of a Kafka cluster to operate on.
string cluster = 1;
// Name of a topic to produce to.
string topic = 2;
// Name of a consumer group.
string group = 3;
// If true then no message is acknowledged by the request.
bool no_ack = 4;
// If true and no_ack is false then the message returned by the requests is
// automatically acknowledged by Kafka-Pixy before the request completes.
bool auto_ack = 5;
// If both no_ack and auto_ack are false (by default), then ack_partition
// and ack_offset along with proxy+group+topic determine the message that
// should be acknowledged by the request.
int32 ack_partition = 6;
int64 ack_offset = 7;
}
message ConsRs {
// Partition the message was read from.
int32 partition = 1;
// Offset of the read message in the partition.
int64 offset = 2;
// Key that was used to produce the message, unless key_undefined is true,
// then it is undefined.
bytes key_value = 3;
// If true then the message was produced to a random partition.
bool key_undefined = 4;
// Message body
bytes message = 5;
}
message AckRq {
// Name of a Kafka cluster to operate on.
string cluster = 1;
// Name of a topic to produce to.
string topic = 2;
// Name of a consumer group.
string group = 3;
// Partition that the acknowledged message was consumed from.
int32 partition = 4;
// Offset in the partition that the acknowledged message was consumed from.
int64 offset = 5;
}
message AckRs {}