Skip to content

Commit 3b2daa4

Browse files
committed
Add GermanAnalyzerBuilder
1 parent dcbbdf7 commit 3b2daa4

File tree

2 files changed

+75
-0
lines changed

2 files changed

+75
-0
lines changed

lucene/src/main/scala/textmogrify/lucene/AnalyzerBuilder.scala

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import org.apache.lucene.analysis.en.PorterStemFilter
2323
import org.apache.lucene.analysis.es.SpanishLightStemFilter
2424
import org.apache.lucene.analysis.fr.FrenchLightStemFilter
2525
import org.apache.lucene.analysis.it.ItalianLightStemFilter
26+
import org.apache.lucene.analysis.de.GermanLightStemFilter
2627
import org.apache.lucene.analysis.LowerCaseFilter
2728
import org.apache.lucene.analysis.Analyzer
2829
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter
@@ -104,6 +105,8 @@ object AnalyzerBuilder {
104105
new EnglishAnalyzerBuilder(Config.empty, false)
105106
def french: FrenchAnalyzerBuilder =
106107
new FrenchAnalyzerBuilder(Config.empty, false)
108+
def german: GermanAnalyzerBuilder =
109+
new GermanAnalyzerBuilder(Config.empty, false)
107110
def italian: ItalianAnalyzerBuilder =
108111
new ItalianAnalyzerBuilder(Config.empty, false)
109112
def spanish: SpanishAnalyzerBuilder =
@@ -233,3 +236,29 @@ final class ItalianAnalyzerBuilder private[lucene] (
233236
def build[F[_]](implicit F: Sync[F]): Resource[F, Analyzer] =
234237
mkFromStandardTokenizer(config)(ts => if (self.stemmer) new ItalianLightStemFilter(ts) else ts)
235238
}
239+
240+
final class GermanAnalyzerBuilder private[lucene] (
241+
config: Config,
242+
stemmer: Boolean,
243+
) extends AnalyzerBuilder(config) { self =>
244+
type Builder = GermanAnalyzerBuilder
245+
246+
private def copy(
247+
newConfig: Config,
248+
stemmer: Boolean = self.stemmer,
249+
): GermanAnalyzerBuilder =
250+
new GermanAnalyzerBuilder(newConfig, stemmer)
251+
252+
def withConfig(newConfig: Config): GermanAnalyzerBuilder =
253+
copy(newConfig = newConfig)
254+
255+
/** Adds the GermanLight Stemmer to the end of the analyzer pipeline and enables lowercasing.
256+
* Stemming reduces words like `jumping` and `jumps` to their root word `jump`.
257+
* NOTE: Lowercasing is forced as it is required for the Lucene GermanLightStemFilter.
258+
*/
259+
def withGermanLightStemmer: GermanAnalyzerBuilder =
260+
copy(config.copy(lowerCase = true), stemmer = true)
261+
262+
def build[F[_]](implicit F: Sync[F]): Resource[F, Analyzer] =
263+
mkFromStandardTokenizer(config)(ts => if (self.stemmer) new GermanLightStemFilter(ts) else ts)
264+
}

lucene/src/test/scala/textmogrify/lucene/AnalyzerBuilderSuite.scala

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,3 +235,49 @@ class ItalianAnalyzerBuilderSuite extends CatsEffectSuite {
235235
}
236236

237237
}
238+
239+
class GermanAnalyzerBuilderSuite extends CatsEffectSuite {
240+
241+
val jalapenos = "Ich mag Jalapeños"
242+
val jumping = "Neeko springt gerne auf Theken"
243+
244+
test("german analyzer default should tokenize without any transformations") {
245+
val analyzer = AnalyzerBuilder.german
246+
val actual = analyzer.tokenizer[IO].use(f => f(jalapenos))
247+
assertIO(actual, Vector("Ich", "mag", "Jalapeños"))
248+
}
249+
250+
test("german analyzer withLowerCasing should lowercase all letters") {
251+
val analyzer = AnalyzerBuilder.german.withLowerCasing
252+
val actual = analyzer.tokenizer[IO].use(f => f(jalapenos))
253+
assertIO(actual, Vector("ich", "mag", "jalapeños"))
254+
}
255+
256+
test("german analyzer withASCIIFolding should fold 'ñ' to 'n'") {
257+
val analyzer = AnalyzerBuilder.german.withASCIIFolding
258+
val actual = analyzer.tokenizer[IO].use(f => f(jalapenos))
259+
assertIO(actual, Vector("Ich", "mag", "Jalapenos"))
260+
}
261+
262+
test("german analyzer withStopWords should filter them out") {
263+
val analyzer = AnalyzerBuilder.german.withStopWords(Set("Ich"))
264+
val actual = analyzer.tokenizer[IO].use(f => f(jalapenos))
265+
assertIO(actual, Vector("mag", "Jalapeños"))
266+
}
267+
268+
test("german analyzer withGermanLightStemmer should lowercase and stem words") {
269+
val analyzer = AnalyzerBuilder.german.withGermanLightStemmer
270+
val actual = analyzer.tokenizer[IO].use(f => f(jumping))
271+
assertIO(actual, Vector("neeko", "springt", "gern", "auf", "thek"))
272+
}
273+
274+
test("german analyzer builder settings can be chained") {
275+
val analyzer = AnalyzerBuilder.german.withGermanLightStemmer
276+
.withStopWords(Set("auf"))
277+
.withASCIIFolding
278+
.withLowerCasing
279+
val actual = analyzer.tokenizer[IO].use(f => f(jumping))
280+
assertIO(actual, Vector("neeko", "springt", "gern", "thek"))
281+
}
282+
283+
}

0 commit comments

Comments
 (0)