diff --git a/docs/index.html b/docs/index.html index e24a7783..8ef81cab 100644 --- a/docs/index.html +++ b/docs/index.html @@ -190,7 +190,7 @@
And we'll define some customer records to be written:
import org.apache.kafka.clients.producer.ProducerRecord
@@ -221,16 +221,16 @@ Writing typed records with an Avro4s producer
Turning a generic producer into a typed producer is simple. We first ensure that com.sksamuel.avro4s.RecordFormat
instances for our data are in scope:
implicit val CustomerRecordFormat = com.sksamuel.avro4s.RecordFormat[Customer]
-// CustomerRecordFormat: com.sksamuel.avro4s.RecordFormat[Customer] = com.sksamuel.avro4s.RecordFormat$$anon$1@244ddd7f
+// CustomerRecordFormat: com.sksamuel.avro4s.RecordFormat[Customer] = com.sksamuel.avro4s.RecordFormat$$anon$1@11a1537e
implicit val CustomerIdRecordFormat = com.sksamuel.avro4s.RecordFormat[CustomerId]
-// CustomerIdRecordFormat: com.sksamuel.avro4s.RecordFormat[CustomerId] = com.sksamuel.avro4s.RecordFormat$$anon$1@75b01fbe
+// CustomerIdRecordFormat: com.sksamuel.avro4s.RecordFormat[CustomerId] = com.sksamuel.avro4s.RecordFormat$$anon$1@21d0424b
And with those implicits in scope, we can create our producer:
val avro4sProducer = producer.map(_.toAvro4s[CustomerId, Customer])
// avro4sProducer: Resource[IO, ProducerApi[[A]IO[A], CustomerId, Customer]] = Bind(
// source = Allocate(
-// resource = cats.effect.kernel.Resource$$$Lambda$10725/0x0000000102d73040@1e98de4c
+// resource = cats.effect.kernel.Resource$$$Lambda$10686/0x0000000102d6b040@17c879dc
// ),
-// fs = cats.effect.kernel.Resource$$Lambda$10853/0x0000000102eb1840@6ae244fb
+// fs = cats.effect.kernel.Resource$$Lambda$10814/0x0000000102ea9040@3fb4ceb2
// )
We can now write our typed customer records successfully!
import cats.effect.unsafe.implicits.global
@@ -259,11 +259,11 @@ // consumer: Resource[IO, ConsumerApi[IO, CustomerId, Customer]] = Bind(
// source = Bind(
// source = Allocate(
-// resource = cats.effect.kernel.Resource$$$Lambda$10725/0x0000000102d73040@629cab09
+// resource = cats.effect.kernel.Resource$$$Lambda$10686/0x0000000102d6b040@5934bb9c
// ),
-// fs = com.banno.kafka.consumer.ConsumerApi$Avro$$$Lambda$10856/0x0000000102eb3840@18fc9c0a
+// fs = com.banno.kafka.consumer.ConsumerApi$Avro$$$Lambda$10817/0x0000000102eab040@359df97c
// ),
-// fs = cats.effect.kernel.Resource$$Lambda$10853/0x0000000102eb1840@2922db26
+// fs = cats.effect.kernel.Resource$$Lambda$10814/0x0000000102ea9040@28518b50
// )
With our Kafka consumer in hand, we'll assign to our consumer our topic partition, with no offsets, so that it starts reading from the first record, and read a stream of records from our Kafka topic:
import org.apache.kafka.common.TopicPartition