Skip to content

Commit 6a21dc6

Browse files
committed
Use eventually from Cats ScalaTest
1 parent ca19be9 commit 6a21dc6

File tree

3 files changed

+27
-40
lines changed

3 files changed

+27
-40
lines changed

src/test/scala/base/AsyncIntSpec.scala

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,12 @@ package base
22

33
import cats.effect.testing.scalatest.AsyncIOSpec
44
import org.scalatest.OptionValues
5+
import org.scalatest.concurrent.Eventually
56
import org.scalatest.matchers.should.Matchers
67
import org.scalatest.wordspec.AsyncWordSpec
78

8-
trait AsyncIntSpec extends AsyncWordSpec with AsyncIOSpec with Matchers with OptionValues
9+
import scala.concurrent.duration.*
10+
11+
trait AsyncIntSpec extends AsyncWordSpec with AsyncIOSpec with Matchers with OptionValues with Eventually {
12+
override implicit val patienceConfig: PatienceConfig = PatienceConfig(200.millis, 10.seconds)
13+
}

src/test/scala/integration/TopicLoaderIntSpec.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,10 +203,10 @@ class TopicLoaderIntSpec extends KafkaSpecBase[IO] {
203203
.compile
204204
.drain
205205
.start
206-
_ <- retry(topicState.get.asserting(_ should contain theSameElementsAs preLoad))
206+
_ <- eventually(topicState.get.asserting(_ should contain theSameElementsAs preLoad))
207207
_ <- loadState.get.asserting(_ shouldBe true)
208208
_ <- publishStringMessages(testTopic1, postLoad)
209-
assertion <- retry(topicState.get.asserting(_ should contain theSameElementsAs (preLoad ++ postLoad)))
209+
assertion <- eventually(topicState.get.asserting(_ should contain theSameElementsAs (preLoad ++ postLoad)))
210210
_ <- fiber.cancel
211211
} yield assertion
212212
}

src/test/scala/utils/KafkaHelpers.scala

Lines changed: 19 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -2,22 +2,24 @@ package utils
22

33
import java.util.UUID
44

5+
import base.AsyncIntSpec
56
import cats.data.{NonEmptyList, NonEmptySet}
67
import cats.effect.{Async, Resource}
78
import cats.syntax.all.*
89
import fs2.Stream
910
import fs2.kafka.{AutoOffsetReset, ConsumerRecord, ConsumerSettings, KafkaConsumer}
1011
import io.github.embeddedkafka.EmbeddedKafkaConfig
1112
import org.apache.kafka.common.TopicPartition
12-
import org.scalatest.exceptions.TestFailedException
13+
import org.scalatest.Assertion
14+
import org.scalatest.enablers.Retrying
1315
import org.typelevel.log4cats.LoggerFactory
1416
import org.typelevel.log4cats.slf4j.Slf4jFactory
1517
import uk.sky.fs2.kafka.topicloader.{LoadTopicStrategy, TopicLoader}
1618

1719
import scala.concurrent.duration.*
1820

1921
trait KafkaHelpers[F[_]] {
20-
self: EmbeddedKafka[F] =>
22+
self: AsyncIntSpec & EmbeddedKafka[F] =>
2123

2224
val groupId = "test-consumer-group"
2325
val testTopic1 = "load-state-topic-1"
@@ -93,50 +95,37 @@ trait KafkaHelpers[F[_]] {
9395
def publishToKafkaAndWaitForCompaction(
9496
partitions: NonEmptySet[TopicPartition],
9597
messages: Seq[(String, String)]
96-
)(implicit kafkaConfig: EmbeddedKafkaConfig, F: Async[F]): F[Unit] = for {
98+
)(implicit kafkaConfig: EmbeddedKafkaConfig, F: Async[F], retrying: Retrying[F[Assertion]]): F[Unit] = for {
9799
_ <- publishToKafkaAndTriggerCompaction(partitions, messages)
98100
_ <- waitForCompaction(partitions)
99101
} yield ()
100102

101103
def waitForCompaction(
102104
partitions: NonEmptySet[TopicPartition]
103-
)(implicit kafkaConfig: EmbeddedKafkaConfig, F: Async[F]): F[Unit] =
105+
)(implicit kafkaConfig: EmbeddedKafkaConfig, F: Async[F], retrying: Retrying[F[Assertion]]): F[Assertion] =
104106
consumeEventually(partitions) { r =>
105107
for {
106108
records <- r
107109
messageKeys = records.map { case (k, _) => k }
108-
result <-
109-
if (messageKeys.sorted == messageKeys.toSet.toList.sorted) F.unit
110-
else F.raiseError(new TestFailedException("Topic has not compacted within timeout", 1))
111-
} yield result
110+
} yield messageKeys should contain theSameElementsAs messageKeys
112111
}
113112

114113
def consumeEventually(
115114
partitions: NonEmptySet[TopicPartition],
116115
groupId: String = UUID.randomUUID().toString
117116
)(
118-
f: F[List[(String, String)]] => F[Unit]
119-
)(implicit kafkaConfig: EmbeddedKafkaConfig, F: Async[F]): F[Unit] =
120-
retry(
121-
fa = {
122-
val records = withAssignedConsumer[F[List[ConsumerRecord[String, String]]]](
123-
autoCommit = false,
124-
offsetReset = AutoOffsetReset.Earliest,
125-
partitions,
126-
groupId.some
127-
)(
128-
_.records
129-
.map(_.record)
130-
.interruptAfter(5.second)
131-
.compile
132-
.toList
133-
)
134-
135-
f(records.map(_.map(r => r.key -> r.value)))
136-
},
137-
delay = 1.second,
138-
max = 5
139-
)
117+
f: F[List[(String, String)]] => F[Assertion]
118+
)(implicit kafkaConfig: EmbeddedKafkaConfig, F: Async[F], retrying: Retrying[F[Assertion]]): F[Assertion] =
119+
eventually {
120+
val records = withAssignedConsumer[F[List[ConsumerRecord[String, String]]]](
121+
autoCommit = false,
122+
offsetReset = AutoOffsetReset.Earliest,
123+
partitions,
124+
groupId.some
125+
)(_.records.map(_.record).interruptAfter(5.second).compile.toList)
126+
127+
f(records.map(_.map(r => r.key -> r.value)))
128+
}
140129

141130
def withAssignedConsumer[T](
142131
autoCommit: Boolean,
@@ -172,11 +161,4 @@ trait KafkaHelpers[F[_]] {
172161
val settings = groupId.fold(baseSettings)(baseSettings.withGroupId)
173162
KafkaConsumer[F].resource(settings)
174163
}
175-
176-
def retry[A](fa: F[A], delay: FiniteDuration = 1.second, max: Int = 10)(implicit F: Async[F]): F[A] =
177-
if (max <= 1) fa
178-
else
179-
fa handleErrorWith { _ =>
180-
F.sleep(delay) *> retry(fa, delay, max - 1)
181-
}
182164
}

0 commit comments

Comments
 (0)