Search code examples
spring-cloud-streamspring-cloud-stream-binder-kafka

Spring Cloud Stream - How to avoid sending messages to non-target brokers


I have a kafka producer application with the following configuration.

spring:
  main:
    banner-mode: off
  application:
    name: sample-app

  cloud:
    stream:
      function:
        definition: sendEvents;jsonEvents

      binders:
        kafka1:
          type: kafka
          environment:
            spring.cloud.stream.kafka.binder:
              brokers:
                - 'localhost:29092'

        kafka2:
          type: kafka
          environment:
            spring.cloud.stream.kafka.binder:
              brokers:
                - 'localhost:29093'

      bindings:
        sendEvents-out-0:
          binder: kafka1
          destination: send_events
          contentType: application/json
        jsonEvents-out-0:
          binder: kafka2
          destination: json_events
          contentType: application/json
        consumer-in-0:
          binder: kafka1
          group: ${spring.application.name}
          destination: send_events
        consumer2-in-0:
          binder: kafka2
          group: ${spring.application.name}
          destination: json_events
import java.util.HashMap;
import java.util.Map;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.cloud.stream.function.StreamBridge;
import org.springframework.http.MediaType;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.util.MimeTypeUtils;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import lombok.extern.slf4j.Slf4j;

@RestController
@RequestMapping(path = "/{datasource}/{topicName}")
@Slf4j
public class MainController
{

  @Autowired
  StreamBridge streamBridge;

  @PostMapping(consumes = MediaType.APPLICATION_JSON_VALUE)
  public Map postMessage(@PathVariable(name = "datasource") String source,
                         @PathVariable(name = "topicName") String topic,
                         @RequestBody Object body)
  {

    Map<String, String> map = new HashMap<>();
    log.info("what is source " + source);

    log.info("what is topic " + topic);
    map.put("source", source);
    map.put("topic", topic);

    Message<Map<String, String>> message = MessageBuilder.withPayload(map)
      .setHeader(MessageHeaders.CONTENT_TYPE, MimeTypeUtils.APPLICATION_JSON)
      .setHeader(MessageHeaders.ERROR_CHANNEL, "error_channel")
      .build();

    log.info("postMessage");
    if (source.equalsIgnoreCase("send")) {
      streamBridge.send("sendEvents-out-0", message);
    } else {
      
      streamBridge.send("jsonEvents-out-0", message);
    }
    return map;


  }
}

The app is a simple spring-boot-starter-webflux application. When a request is received, it will send the body to the corresponding Kafka brokers. Messages in one broker are not supposed to be in another broker. StreamBridge is used to send messages to different bindings.

However, when I tested the application, I found that the message only supposed to be in kafka2's (jsonEvents-out-0) could also be found in kafka1 in the same topic (json_events). How could I completely avoid kafka1 storing messages supposed to be in kafka2 only?


Solution

  • I found the solution. I can bind the autowired BindingServiceProperty to the bean and get the BinderName by BindingServiceProperty

    bindingName = "sendEvents-out-0";
    
    String binder = this.bindingServiceProperties.getBindingProperties(bindingName).getBinder();
    
    streamBridge.send(bindingName, binder, message);
    

    In this case, the message will not be sent to the default kafka cluster.