springboot下kafka⼿动创建topic并指定分区(partition)数及
分区。。。
依赖:
1 <dependency>
2 <groupId>org.springframework.boot</groupId>
3 <artifactId>spring-boot-starter-web</artifactId>
4 </dependency>
5
6
7
8 <dependency>
9 <groupId>org.springframework.kafka</groupId>
10 <artifactId>spring-kafka</artifactId>
11 </dependency>
application.properties:
1### kafka configure
2spring.kafka.bootstrap-servers=10.160.3.70:9092
up-id=sea-test
able-auto-commit=false
sumer.auto-offset-reset=earliest
sumer.max-poll-records=2000
7#sumer.key-deserializer=org.apache.kafkamon.serialization.StringDeserializer
8#sumer.value-deserializer=org.apache.kafkamon.serialization.StringDeserializer
9spring.ies=3
10spring.kafka.producer.batch-size=16384
11spring.kafka.producer.buffer-memory=33554432
12spring.kafka.producer.linger=10
13#spring.kafka.producer.key-serializer=org.apache.kafkamon.serialization.StringSerializer
14#spring.kafka.producer.value-serializer=org.apache.kafkamon.serialization.StringSerializer
KafkaConfig:
1package fig;
2
3import java.util.HashMap;
4import java.util.Map;
5
6import org.apache.kafka.clients.admin.AdminClient;
7import org.apache.kafka.clients.admin.AdminClientConfig;
8import org.apache.sumer.ConsumerConfig;
9import org.apache.kafka.clients.producer.ProducerConfig;
10import org.apache.kafkamon.serialization.StringDeserializer;
11import org.apache.kafkamon.serialization.StringSerializer;
12import org.springframework.beans.factory.annotation.Value;
13import t.annotation.Bean;
14import t.annotation.Configuration;
14import t.annotation.Configuration;
15import org.springframework.kafka.annotation.EnableKafka;
16import org.fig.ConcurrentKafkaListenerContainerFactory;
17import org.fig.KafkaListenerContainerFactory;
18import org.DefaultKafkaConsumerFactory;
19import org.DefaultKafkaProducerFactory;
20import org.KafkaAdmin;
21import org.KafkaTemplate;
22import org.ProducerFactory;
23import org.springframework.kafka.listener.ContainerProperties;
24
llect.Maps;
26
27@Configuration
28@EnableKafka
29public class KafkaConfig {
30
31 @Value("${spring.kafka.bootstrap-servers}")
32 private String bootstrapServers;
33
34 @Value("${up-id}")
35 private String groupId;
36
37 @Value("${able-auto-commit}")
38 private Boolean autoCommit;
39
40 @Value("${sumer.auto-offset-reset}")
41 private String autoOffsetReset;
42
43 @Value("${sumer.max-poll-records}")
44 private Integer maxPollRecords;
45
46 @Value("${spring.kafka.producer.linger}")
47 private int linger;
48
49 @Value("${spring.ies}")
50 private Integer retries;
51
52 @Value("${spring.kafka.producer.batch-size}")
53 private Integer batchSize;
54
55 @Value("${spring.kafka.producer.buffer-memory}")
56 private Integer bufferMemory;
57
58
59 //cankao :blog.csdn/tmeng521/article/details/90901925
60 public Map<String, Object> producerConfigs() {
61
62 Map<String, Object> props = new HashMap<>();
63 props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
64 //设置重试次数
65 props.put(ProducerConfig.RETRIES_CONFIG, retries);
66 //达到batchSize⼤⼩的时候会发送消息
67 props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
68 //延时时间,延时时间到达之后计算批量发送的⼤⼩没达到也发送消息
69 props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
70 //缓冲区的值
71 props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
72 //序列化⼿段
73 props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
74 props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
75 //producer端的消息确认机制,-1和all都表⽰消息不仅要写⼊本地的leader中还要写⼊对应的副本中
76 props.put(ProducerConfig.ACKS_CONFIG, "-1");//单个brok 推荐使⽤'1'
77 //单条消息的最⼤值以字节为单位,默认值为1048576
78 props.put(ProducerConfig.LINGER_MS_CONFIG, 10485760);
79 //设置broker响应时间,如果broker在60秒之内还是没有返回给producer确认消息,则认为发送失败
80 props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000);
81 //指定(value为对应的class)
82 //props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "handler.KafkaProducerInterceptor");
83 //设置压缩算法(默认是⽊有压缩算法的)
84 props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");//snappy
85 return props;
86 }
87
88
89
90 @Bean //创建⼀个kafka管理类,相当于rabbitMQ的管理类rabbitAdmin,没有此bean⽆法⾃定义的使⽤adminClient创建topic
91 public KafkaAdmin kafkaAdmin() {
92 Map<String, Object> props = new HashMap<>();
93 //配置Kafka实例的连接地址
94 //kafka的地址,不是zookeeper
95 props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
96 KafkaAdmin admin = new KafkaAdmin(props);
97 return admin;
98 }
99
100 @Bean //kafka客户端,在spring中创建这个bean之后可以注⼊并且创建topic,⽤于集环境,创建对个副本
101 public AdminClient adminClient() {
102 ate(kafkaAdmin().getConfig());
103 }
104
105
106
107 @Bean
108 public ProducerFactory<String, String> producerFactory() {
109 return new DefaultKafkaProducerFactory<>(producerConfigs());
110 }
111
112 @Bean
113 public KafkaTemplate<String, String> kafkaTemplate() {
114 return new KafkaTemplate<>(producerFactory());
115 }
116
117
118
119
120
121 @Bean
122 public Map<String, Object> consumerConfigs() {
123 Map<String, Object> props = wHashMap();
124 props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
125 props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
126 props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
127 props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
128 props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
129// props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 180000);
130// props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 900000);
131// props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 900000);
132 props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
133 props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
134 return props;
135 }
136
137
138 @Bean
139 public KafkaListenerContainerFactory<?> batchFactory() {
140 ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); 141 factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
142 //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
143 factory.setBatchListener(true);
144 // set the retry template
145// factory.setRetryTemplate(retryTemplate());
146 ContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
147 return factory;
148 }
149
150
151}
1@Configuration
2public class KafkaInitialConfiguration {
3
4 //创建TopicName为topic.quick.initial的Topic并设置分区数为8以及副本数为1
5 @Bean//通过bean创建(bean的名字为initialTopic)
6 public NewTopic initialTopic() {
7 return new NewTopic("topic.quick.initial",8, (short) 1 );
8 }
9 /**
10 * 此种@Bean的⽅式,如果topic的名字相同,那么会覆盖以前的那个
11 * @return
12 */
13// //修改后|分区数量会变成11个注意分区数量只能增加不能减少
14 @Bean
15 public NewTopic initialTopic2() {
16 return new NewTopic("topic.quick.initial",11, (short) 1 );
17 }
18 @Bean //创建⼀个kafka管理类,相当于rabbitMQ的管理类rabbitAdmin,没有此bean⽆法⾃定义的使⽤adminClient创建topic
springboot推荐算法19 public KafkaAdmin kafkaAdmin() {
20 Map<String, Object> props = new HashMap<>();
21 //配置Kafka实例的连接地址 //kafka的地址,不是zookeeper
22 props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
23 KafkaAdmin admin = new KafkaAdmin(props);
24 return admin;
25 }
26
27 @Bean //kafka客户端,在spring中创建这个bean之后可以注⼊并且创建topic
28 public AdminClient adminClient() {
29 ate(kafkaAdmin().getConfig());
30 }
31
32
33}
test ⼿动创建topic ,⼿动查看所有topic
1 @Autowired // adminClien需要⾃⼰⽣成配置bean
2 private AdminClient adminClient;
3
4
5 @Autowired
6 private KafkaTemplate<String, String> kafkaTemplate;
7
8 @Test//⾃定义⼿动创建topic和分区
9 public void testCreateTopic() throws InterruptedException {
10 // 这种是⼿动创建 //10个分区,⼀个副本
11 // 分区多的好处是能快速的处理并发量,但是也要根据机器的配置
12 NewTopic topic = new NewTopic("ate", 10, (short) 1);
13 ateTopics(Arrays.asList(topic));
14 Thread.sleep(1000);
15 }
16
17
18 /**
19 * 获取所有的topic
20 * @throws Exception
21 */
22 @Test
23 public void getAllTopic() throws Exception {
24 ListTopicsResult listTopics = adminClient.listTopics();
25 Set<String> topics = listTopics.names().get();
26
27 for (String topic : topics) {
28 println(topic);
29
30 }
31 }
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论