Search code examples
javaapache-sparkapache-kafkaavro

Java Kafka consumer and avro deserialzier


I'm developing a simple java with spark streaming.

I configured a kafka jdbc connector (postgres to topic) and I wanna read it with a spark streaming consumer.

I'm able to read to topic correctly with:

./kafka-avro-console-consumer --bootstrap-server localhost:9092 --property schema.registry.url=http://localhost:8081 --property print.key=true --from-beginning --topic postgres-ip_audit

getting this results:

null {"id":1557,"ip":{"string":"90.228.176.138"},"create_ts":{"long":1554819937582}}

when I use my java application with this config:

Map<String, Object> kafkaParams = new HashMap<>();
kafkaParams.put("bootstrap.servers", "localhost:9092");
kafkaParams.put("key.deserializer", StringDeserializer.class);
kafkaParams.put("value.deserializer", StringDeserializer.class);
kafkaParams.put("group.id", "groupStreamId");
kafkaParams.put("auto.offset.reset", "latest");
kafkaParams.put("enable.auto.commit", false);

I get results like that:

�179.20.119.53�����Z

Can someone point me how to fix my issue?

I try also to use a ByteArrayDeserializer and convert the bytes[] in to a string but I get always bad character results.


Solution

  • You can deserialize avro messages using io.confluent.kafka.serializers.KafkaAvroDeserializer and having schema registry in to manage the records schema.

    Here is a sample code snippet

    import java.util.Collections;
    import java.util.HashMap;
    import java.util.HashSet;
    import java.util.Map;
    import java.util.Set;
    
    import io.confluent.kafka.serializers.KafkaAvroDecoder;
    import kafka.serializer.StringDecoder;
    import org.apache.spark.SparkConf;
    import org.apache.spark.api.java.JavaSparkContext;
    import org.apache.spark.streaming.Durations;
    import org.apache.spark.streaming.api.java.JavaPairInputDStream;
    import org.apache.spark.streaming.api.java.JavaStreamingContext;
    import org.apache.spark.streaming.kafka.KafkaUtils;
    import scala.Tuple2;
    
    public class SparkStreaming {
    
      public static void main(String... args) {
        SparkConf conf = new SparkConf();
        conf.setMaster("local[2]");
        conf.setAppName("Spark Streaming Test Java");
    
        JavaSparkContext sc = new JavaSparkContext(conf);
        JavaStreamingContext ssc = new JavaStreamingContext(sc, Durations.seconds(10));
    
        processStream(ssc, sc);
    
        ssc.start();
        ssc.awaitTermination();
      }
    
      private static void processStream(JavaStreamingContext ssc, JavaSparkContext sc) {
        System.out.println("--> Processing stream");
    
        Map<String, String> props = new HashMap<>();
        props.put("bootstrap.servers", "localhost:9092");
        props.put("schema.registry.url", "http://localhost:8081");
        props.put("group.id", "spark");
        props.put("specific.avro.reader", "true");
    
        props.put("value.deserializer", "io.confluent.kafka.serializers.KafkaAvroDeserializer");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    
        Set<String> topicsSet = new HashSet<>(Collections.singletonList("test"));
    
        JavaPairInputDStream<String, Object> stream = KafkaUtils.createDirectStream(ssc, String.class, Object.class,
          StringDecoder.class, KafkaAvroDecoder.class, props, topicsSet);
    
        stream.foreachRDD(rdd -> {
          rdd.foreachPartition(iterator -> {
              while (iterator.hasNext()) {
                Tuple2<String, Object> next = iterator.next();
                Model model = (Model) next._2();
                System.out.println(next._1() + " --> " + model);
              }
            }
          );
        });
      }
    }
    

    Complete sample application is available in this github repo