1 | import * as kafka from '..';
|
2 |
|
3 |
|
4 |
|
5 |
|
6 | const basicClient = new kafka.Client('localhost:2181/', 'sendMessage');
|
7 |
|
8 | const optionsClient = new kafka.Client(
|
9 | 'localhost:2181/',
|
10 | 'sendMessage',
|
11 | { sessionTimeout: 30000, spinDelay: 1000, retries: 0 },
|
12 | { noAckBatchSize: 1000, noAckBatchAge: 1000 * 10 },
|
13 | { rejectUnauthorized: false }
|
14 | );
|
15 |
|
16 | optionsClient.topicExists(['topic'], (error: any) => { });
|
17 | optionsClient.refreshMetadata(['topic'], (error: any) => { });
|
18 | optionsClient.close();
|
19 | optionsClient.sendOffsetCommitV2Request('group', 0, 'memberId', [], () => { });
|
20 | optionsClient.close(() => { });
|
21 |
|
22 | const basicKafkaClient = new kafka.KafkaClient();
|
23 |
|
24 | const optionsKafkaClient = new kafka.KafkaClient({
|
25 | kafkaHost: 'localhost:2181',
|
26 | connectTimeout: 1000,
|
27 | requestTimeout: 1000,
|
28 | autoConnect: true,
|
29 | sslOptions: {},
|
30 | clientId: 'client id',
|
31 | connectRetryOptions: {
|
32 | retries: 5, factor: 0, minTimeout: 1000, maxTimeout: 1000, randomize: true
|
33 | }
|
34 | });
|
35 |
|
36 | optionsKafkaClient.connect();
|
37 |
|
38 |
|
39 |
|
40 |
|
41 | const optionsProducer = new kafka.Producer(basicClient, { requireAcks: 0, ackTimeoutMs: 0, partitionerType: 0 });
|
42 |
|
43 | const producer = new kafka.Producer(basicClient);
|
44 | producer.on('error', (error: Error) => { });
|
45 | producer.on('ready', () => {
|
46 | const messages = [
|
47 | { topic: 'topicName', messages: ['message body'], partition: 0, attributes: 2 },
|
48 | { topic: 'topicName', messages: ['message body'], partition: 0 },
|
49 | { topic: 'topicName', messages: ['message body'], attributes: 0 },
|
50 | { topic: 'topicName', messages: ['message body'] },
|
51 | { topic: 'topicName', messages: [new kafka.KeyedMessage('key', 'message')] }
|
52 | ];
|
53 |
|
54 | producer.send(messages, (err: Error) => { });
|
55 | producer.send(messages, (err: Error, data: any) => { });
|
56 |
|
57 | producer.createTopics(['t'], true, (err: Error, data: any) => { });
|
58 | producer.createTopics(['t'], (err: Error, data: any) => { });
|
59 | producer.createTopics(['t'], false, () => { });
|
60 | producer.close();
|
61 | });
|
62 |
|
63 |
|
64 |
|
65 |
|
66 | const highLevelProducer = new kafka.HighLevelProducer(basicClient);
|
67 |
|
68 | highLevelProducer.on('error', (error: Error) => { });
|
69 | highLevelProducer.on('ready', () => {
|
70 | const messages = [
|
71 | { topic: 'topicName', messages: ['message body'], attributes: 2 },
|
72 | { topic: 'topicName', messages: ['message body'], partition: 0 },
|
73 | { topic: 'topicName', messages: ['message body'], attributes: 0 },
|
74 | { topic: 'topicName', messages: ['message body'] },
|
75 | { topic: 'topicName', messages: [new kafka.KeyedMessage('key', 'message')] }
|
76 | ];
|
77 |
|
78 | highLevelProducer.send(messages, (err: Error) => { });
|
79 | highLevelProducer.send(messages, (err: Error, data: any) => { });
|
80 |
|
81 | producer.createTopics(['t'], true, (err: Error, data: any) => { });
|
82 | producer.createTopics(['t'], (err: Error, data: any) => { });
|
83 | producer.createTopics(['t'], false, () => { });
|
84 | producer.close();
|
85 | });
|
86 |
|
87 |
|
88 |
|
89 |
|
90 | const fetchRequests = [{ topic: 'awesome' }];
|
91 | const consumer = new kafka.Consumer(basicClient, fetchRequests, { groupId: 'abcde', autoCommit: true });
|
92 |
|
93 | consumer.on('error', (error: Error) => { });
|
94 | consumer.on('offsetOutOfRange', (error: Error) => { });
|
95 | consumer.on('message', (message: kafka.Message) => {
|
96 | const topic = message.topic;
|
97 | const value = message.value;
|
98 | const offset = message.offset;
|
99 | const partition = message.partition;
|
100 | const highWaterOffset = message.highWaterOffset;
|
101 | const key = message.key;
|
102 | });
|
103 |
|
104 | consumer.addTopics(['t1', 't2'], (err: any, added: any) => { });
|
105 | consumer.addTopics([{ topic: 't1', offset: 10 }], (err: any, added: any) => { }, true);
|
106 |
|
107 | consumer.removeTopics(['t1', 't2'], (err: any, removed: number) => { });
|
108 | consumer.removeTopics('t2', (err: any, removed: number) => { });
|
109 |
|
110 | consumer.commit((err: any, data: any) => { });
|
111 | consumer.commit(true, (err: any, data: any) => { });
|
112 |
|
113 | consumer.setOffset('topic', 0, 0);
|
114 |
|
115 | consumer.pause();
|
116 | consumer.resume();
|
117 | consumer.pauseTopics(['topic1', { topic: 'topic2', partition: 0 }]);
|
118 | consumer.resumeTopics(['topic1', { topic: 'topic2', partition: 0 }]);
|
119 |
|
120 | consumer.close(true, () => { });
|
121 | consumer.close((err: any) => { });
|
122 |
|
123 |
|
124 |
|
125 |
|
126 | const fetchRequests1 = [{ topic: 'awesome' }];
|
127 | const hlConsumer = new kafka.HighLevelConsumer(basicClient, fetchRequests1, { groupId: 'abcde', autoCommit: true });
|
128 |
|
129 | hlConsumer.on('error', (error: Error) => { });
|
130 | hlConsumer.on('offsetOutOfRange', (error: Error) => { });
|
131 | hlConsumer.on('message', (message: kafka.Message) => {
|
132 | const topic = message.topic;
|
133 | const value = message.value;
|
134 | const offset = message.offset;
|
135 | const partition = message.partition;
|
136 | const highWaterOffset = message.highWaterOffset;
|
137 | const key = message.key;
|
138 | });
|
139 |
|
140 | hlConsumer.addTopics(['t1', 't2'], (err: any, added: any) => { });
|
141 | hlConsumer.addTopics([{ topic: 't1', offset: 10 }], (err: any, added: any) => { });
|
142 |
|
143 | hlConsumer.removeTopics(['t1', 't2'], (err: any, removed: number) => { });
|
144 | hlConsumer.removeTopics('t2', (err: any, removed: number) => { });
|
145 |
|
146 | hlConsumer.commit((err: any, data: any) => { });
|
147 | hlConsumer.commit(true, (err: any, data: any) => { });
|
148 |
|
149 | hlConsumer.setOffset('topic', 0, 0);
|
150 |
|
151 | hlConsumer.pause();
|
152 | hlConsumer.resume();
|
153 |
|
154 | hlConsumer.close(true, () => { });
|
155 | hlConsumer.close(() => { });
|
156 |
|
157 |
|
158 |
|
159 |
|
160 | const ackBatchOptions = { noAckBatchSize: 1024, noAckBatchAge: 10 };
|
161 | const cgOptions: kafka.ConsumerGroupOptions = {
|
162 | host: 'localhost:2181',
|
163 | batch: ackBatchOptions,
|
164 | groupId: 'groupID',
|
165 | id: 'consumerID',
|
166 | encoding: 'buffer',
|
167 | keyEncoding: 'buffer',
|
168 | sessionTimeout: 15000,
|
169 | protocol: ['roundrobin'],
|
170 | fromOffset: 'latest',
|
171 | migrateHLC: false,
|
172 | migrateRolling: true
|
173 | };
|
174 |
|
175 | const consumerGroup = new kafka.ConsumerGroup(cgOptions, ['topic1']);
|
176 | consumerGroup.on('error', (err) => { });
|
177 | consumerGroup.on('message', (msg) => { });
|
178 | consumerGroup.close(true, (err: Error) => { });
|
179 |
|
180 | const offset = new kafka.Offset(basicClient);
|
181 |
|
182 | offset.on('ready', () => { });
|
183 |
|
184 | offset.fetch([{ topic: 't', partition: 0, time: Date.now(), maxNum: 1 }, { topic: 't' }], (err: any, data: any) => { });
|
185 |
|
186 | offset.commit('groupId', [{ topic: 't', partition: 0, offset: 10 }], (err, data) => { });
|
187 |
|
188 | offset.fetchCommits('groupId', [{ topic: 't', partition: 0 }], (err, data) => { });
|
189 |
|
190 | offset.fetchLatestOffsets(['t'], (err, offsets) => { });
|
191 | offset.fetchEarliestOffsets(['t'], (err, offsets) => { });
|