|
| 1 | +# Copyright 2023–2025 Google LLC |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# https://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +""" Tests for tokenizer |
| 16 | +""" |
| 17 | + |
| 18 | +import unittest |
| 19 | + |
| 20 | +import grain.python as grain |
| 21 | +import numpy as np |
| 22 | +from MaxText.input_pipeline import _grain_tokenizer |
| 23 | +from MaxText.input_pipeline import _input_pipeline_utils |
| 24 | +from numpy.testing import assert_array_equal |
| 25 | + |
| 26 | + |
| 27 | +class MockTokenizer: |
| 28 | + """ |
| 29 | + Mocks a tokenizer by splitting on space and mapping letters to simple ints. |
| 30 | + e.g., "a b c" -> [1, 2, 3] |
| 31 | + """ |
| 32 | + |
| 33 | + def encode(self, text: str) -> list[int]: |
| 34 | + if not text: |
| 35 | + return [] |
| 36 | + # Simple 'a'=1, 'b'=2, ... mapping |
| 37 | + return [ord(c) - ord("a") + 1 for c in text.split(" ")] |
| 38 | + |
| 39 | + |
| 40 | +class TokenizerTransformTest(unittest.TestCase): |
| 41 | + """Tests for chunking, trimming, and padding transformations.""" |
| 42 | + |
| 43 | + def setUp(self): |
| 44 | + self.max_len = 5 |
| 45 | + self.pad_length = 7 |
| 46 | + self.pad_id = 0 |
| 47 | + self.feature_names = "text" |
| 48 | + self.mock_tokenizer = MockTokenizer() |
| 49 | + self.source_data = [{"text": "a b c"}, {"text": "d e f g h i j"}, {"text": ""}, {"text": "k l m n o p q r s t"}] |
| 50 | + self.base_ds = grain.MapDataset.source(self.source_data).to_iter_dataset() |
| 51 | + |
| 52 | + def test_tokenize_and_trim(self): |
| 53 | + """Tests the 1:1 MapTransform (truncation) logic.""" |
| 54 | + trim_op = _grain_tokenizer.TokenizeAndTrim( |
| 55 | + feature_names=self.feature_names, sequence_length=self.max_len, tokenizer=self.mock_tokenizer |
| 56 | + ) |
| 57 | + trim_ds = self.base_ds.map(trim_op) |
| 58 | + results = list(trim_ds) |
| 59 | + self.assertEqual(len(results), len(self.source_data)) |
| 60 | + expected_inputs = [ |
| 61 | + np.array([1, 2, 3], dtype=np.int32), |
| 62 | + np.array([4, 5, 6, 7, 8], dtype=np.int32), |
| 63 | + np.array([], dtype=np.int32), |
| 64 | + np.array([11, 12, 13, 14, 15], dtype=np.int32), |
| 65 | + ] |
| 66 | + result_inputs = [r["text"] for r in results] |
| 67 | + self.assertEqual(len(result_inputs), len(expected_inputs)) |
| 68 | + for res, exp in zip(result_inputs, expected_inputs): |
| 69 | + assert_array_equal(res, exp) |
| 70 | + |
| 71 | + def test_tokenize_and_chunk(self): |
| 72 | + """Tests the 1:N FlatMapTransform (chunking) logic.""" |
| 73 | + chunk_op = _grain_tokenizer.TokenizeAndChunk( |
| 74 | + feature_names=self.feature_names, sequence_length=self.max_len, tokenizer=self.mock_tokenizer |
| 75 | + ) |
| 76 | + chunk_ds = self.base_ds.apply(chunk_op) |
| 77 | + results = list(chunk_ds) |
| 78 | + self.assertEqual(len(results), 5) |
| 79 | + expected_inputs = [ |
| 80 | + np.array([1, 2, 3], dtype=np.int32), |
| 81 | + np.array([4, 5, 6, 7, 8], dtype=np.int32), |
| 82 | + np.array([9, 10], dtype=np.int32), |
| 83 | + np.array([11, 12, 13, 14, 15], dtype=np.int32), |
| 84 | + np.array([16, 17, 18, 19, 20], dtype=np.int32), |
| 85 | + ] |
| 86 | + result_inputs = [r["text"] for r in results] |
| 87 | + self.assertEqual(len(result_inputs), len(expected_inputs)) |
| 88 | + for res, exp in zip(result_inputs, expected_inputs): |
| 89 | + assert_array_equal(res, exp) |
| 90 | + |
| 91 | + def test_trim_and_pad_chaining(self): |
| 92 | + """Tests chaining TokenizeAndTrim.map() -> PadOrTrimToMaxLength.map()""" |
| 93 | + trim_op = _grain_tokenizer.TokenizeAndTrim( |
| 94 | + feature_names=self.feature_names, sequence_length=self.max_len, tokenizer=self.mock_tokenizer |
| 95 | + ) |
| 96 | + pad_op = _input_pipeline_utils.PadOrTrimToMaxLength(max_length=self.pad_length, pad_id=self.pad_id) |
| 97 | + chained_ds = self.base_ds.map(trim_op).map(pad_op) |
| 98 | + results = list(chained_ds) |
| 99 | + self.assertEqual(len(results), len(self.source_data)) |
| 100 | + expected_inputs = [ |
| 101 | + np.array([1, 2, 3, 0, 0, 0, 0], dtype=np.int32), |
| 102 | + np.array([4, 5, 6, 7, 8, 0, 0], dtype=np.int32), |
| 103 | + np.array([0, 0, 0, 0, 0, 0, 0], dtype=np.int32), |
| 104 | + np.array([11, 12, 13, 14, 15, 0, 0], dtype=np.int32), |
| 105 | + ] |
| 106 | + result_inputs = [r["text"] for r in results] |
| 107 | + self.assertEqual(len(result_inputs), len(expected_inputs)) |
| 108 | + for res, exp in zip(result_inputs, expected_inputs): |
| 109 | + assert_array_equal(res, exp) |
| 110 | + |
| 111 | + def test_chunk_and_pad_chaining(self): |
| 112 | + """Tests chaining TokenizeAndChunk.apply() -> PadOrTrimToMaxLength.map()""" |
| 113 | + chunk_op = _grain_tokenizer.TokenizeAndChunk( |
| 114 | + feature_names=self.feature_names, sequence_length=self.max_len, tokenizer=self.mock_tokenizer |
| 115 | + ) |
| 116 | + pad_op = _input_pipeline_utils.PadOrTrimToMaxLength(max_length=self.pad_length, pad_id=self.pad_id) |
| 117 | + chained_ds = self.base_ds.apply(chunk_op).map(pad_op) |
| 118 | + results = list(chained_ds) |
| 119 | + self.assertEqual(len(results), 5) |
| 120 | + expected_inputs = [ |
| 121 | + np.array([1, 2, 3, 0, 0, 0, 0], dtype=np.int32), |
| 122 | + np.array([4, 5, 6, 7, 8, 0, 0], dtype=np.int32), |
| 123 | + np.array([9, 10, 0, 0, 0, 0, 0], dtype=np.int32), |
| 124 | + np.array([11, 12, 13, 14, 15, 0, 0], dtype=np.int32), |
| 125 | + np.array([16, 17, 18, 19, 20, 0, 0], dtype=np.int32), |
| 126 | + ] |
| 127 | + result_inputs = [r["text"] for r in results] |
| 128 | + self.assertEqual(len(result_inputs), len(expected_inputs)) |
| 129 | + for res, exp in zip(result_inputs, expected_inputs): |
| 130 | + assert_array_equal(res, exp) |
| 131 | + |
| 132 | + |
| 133 | +if __name__ == "__main__": |
| 134 | + unittest.main() |
0 commit comments