Skip to content

Commit 9dac96e

Browse files
committed
Switch tf -> numpy in more docstrings (#1123)
1 parent cd4dd0a commit 9dac96e

34 files changed

+113
-177
lines changed

keras_nlp/models/albert/albert_backbone.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -76,13 +76,9 @@ class AlbertBackbone(Backbone):
7676
Examples:
7777
```python
7878
input_data = {
79-
"token_ids": tf.ones(shape=(1, 12), dtype="int64"),
80-
"segment_ids": tf.constant(
81-
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)
82-
),
83-
"padding_mask": tf.constant(
84-
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)
85-
),
79+
"token_ids": np.ones(shape=(1, 12), dtype="int32"),
80+
"segment_ids": np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]]),
81+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]),
8682
}
8783
8884
# Randomly initialized ALBERT encoder

keras_nlp/models/albert/albert_classifier.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -85,13 +85,9 @@ class AlbertClassifier(Task):
8585
Preprocessed integer data.
8686
```python
8787
features = {
88-
"token_ids": tf.ones(shape=(2, 12), dtype="int64"),
89-
"segment_ids": tf.constant(
90-
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12)
91-
),
92-
"padding_mask": tf.constant(
93-
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12)
94-
),
88+
"token_ids": np.ones(shape=(2, 12), dtype="int32"),
89+
"segment_ids": np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]] * 2),
90+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2),
9591
}
9692
labels = [0, 3]
9793

keras_nlp/models/albert/albert_masked_lm.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -81,14 +81,10 @@ class AlbertMaskedLM(Task):
8181
```python
8282
# Create preprocessed batch where 0 is the mask token.
8383
features = {
84-
"token_ids": tf.constant(
85-
[[1, 2, 0, 4, 0, 6, 7, 8]] * 2, shape=(2, 8)
86-
),
87-
"padding_mask": tf.constant(
88-
[[1, 1, 1, 1, 1, 1, 1, 1]] * 2, shape=(2, 8)
89-
),
90-
"mask_positions": tf.constant([[2, 4]] * 2, shape=(2, 2)),
91-
"segment_ids": tf.constant([[0, 0, 0, 0, 0, 0, 0, 0]] * 2, shape=(2, 8))
84+
"token_ids": np.array([[1, 2, 0, 4, 0, 6, 7, 8]] * 2),
85+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1]] * 2),
86+
"mask_positions": np.array([[2, 4]] * 2),
87+
"segment_ids": np.array([[0, 0, 0, 0, 0, 0, 0, 0]] * 2),
9288
}
9389
# Labels are the original masked values.
9490
labels = [[3, 5]] * 2

keras_nlp/models/bart/bart_backbone.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,13 @@ class BartBackbone(Backbone):
6565
Examples:
6666
```python
6767
input_data = {
68-
"encoder_token_ids": tf.ones(shape=(1, 12), dtype="int64"),
69-
"encoder_padding_mask": tf.constant(
70-
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)
68+
"encoder_token_ids": np.ones(shape=(1, 12), dtype="int32"),
69+
"encoder_padding_mask": np.array(
70+
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]
7171
),
72-
"decoder_token_ids": tf.ones(shape=(1, 12), dtype="int64"),
73-
"decoder_padding_mask": tf.constant(
74-
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], shape=(1, 12)
72+
"decoder_token_ids": np.ones(shape=(1, 12), dtype="int32"),
73+
"decoder_padding_mask": np.array(
74+
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]
7575
),
7676
}
7777

keras_nlp/models/bart/bart_seq_2_seq_lm.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -109,12 +109,12 @@ class BartSeq2SeqLM(GenerativeTask):
109109
# "The quick brown fox", and the decoder inputs to "The fast". Use
110110
# `"padding_mask"` to indicate values that should not be overridden.
111111
prompt = {
112-
"encoder_token_ids": tf.constant([[0, 133, 2119, 6219, 23602, 2, 1, 1]]),
113-
"encoder_padding_mask": tf.constant(
112+
"encoder_token_ids": np.array([[0, 133, 2119, 6219, 23602, 2, 1, 1]]),
113+
"encoder_padding_mask": np.array(
114114
[[True, True, True, True, True, True, False, False]]
115115
),
116-
"decoder_token_ids": tf.constant([[2, 0, 133, 1769, 2, 1, 1]]),
117-
"decoder_padding_mask": tf.constant([[True, True, True, True, False, False]])
116+
"decoder_token_ids": np.array([[2, 0, 133, 1769, 2, 1, 1]]),
117+
"decoder_padding_mask": np.array([[True, True, True, True, False, False]])
118118
}
119119
120120
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset(
@@ -137,13 +137,13 @@ class BartSeq2SeqLM(GenerativeTask):
137137
Call `fit()` without preprocessing.
138138
```python
139139
x = {
140-
"encoder_token_ids": tf.constant([[0, 133, 2119, 2, 1]] * 2),
141-
"encoder_padding_mask": tf.constant([[1, 1, 1, 1, 0]] * 2),
142-
"decoder_token_ids": tf.constant([[2, 0, 133, 1769, 2]] * 2),
143-
"decoder_padding_mask": tf.constant([[1, 1, 1, 1, 1]] * 2),
140+
"encoder_token_ids": np.array([[0, 133, 2119, 2, 1]] * 2),
141+
"encoder_padding_mask": np.array([[1, 1, 1, 1, 0]] * 2),
142+
"decoder_token_ids": np.array([[2, 0, 133, 1769, 2]] * 2),
143+
"decoder_padding_mask": np.array([[1, 1, 1, 1, 1]] * 2),
144144
}
145-
y = tf.constant([[0, 133, 1769, 2, 1]] * 2)
146-
sw = tf.constant([[1, 1, 1, 1, 0]] * 2)
145+
y = np.array([[0, 133, 1769, 2, 1]] * 2)
146+
sw = np.array([[1, 1, 1, 1, 0]] * 2)
147147
148148
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset(
149149
"bart_base_en",

keras_nlp/models/bert/bert_backbone.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,9 @@ class BertBackbone(Backbone):
6565
Examples:
6666
```python
6767
input_data = {
68-
"token_ids": tf.ones(shape=(1, 12), dtype="int64"),
69-
"segment_ids": tf.constant(
70-
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)
71-
),
72-
"padding_mask": tf.constant(
73-
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)
74-
),
68+
"token_ids": np.ones(shape=(1, 12), dtype="int32"),
69+
"segment_ids": np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]]),
70+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]),
7571
}
7672
7773
# Pretrained BERT encoder.

keras_nlp/models/bert/bert_classifier.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -86,13 +86,9 @@ class BertClassifier(Task):
8686
Preprocessed integer data.
8787
```python
8888
features = {
89-
"token_ids": tf.ones(shape=(2, 12), dtype="int64"),
90-
"segment_ids": tf.constant(
91-
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12)
92-
),
93-
"padding_mask": tf.constant(
94-
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12)
95-
),
89+
"token_ids": np.ones(shape=(2, 12), dtype="int32"),
90+
"segment_ids": np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]] * 2),
91+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2),
9692
}
9793
labels = [0, 3]
9894

keras_nlp/models/bert/bert_masked_lm.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -80,14 +80,10 @@ class BertMaskedLM(Task):
8080
```python
8181
# Create preprocessed batch where 0 is the mask token.
8282
features = {
83-
"token_ids": tf.constant(
84-
[[1, 2, 0, 4, 0, 6, 7, 8]] * 2, shape=(2, 8)
85-
),
86-
"padding_mask": tf.constant(
87-
[[1, 1, 1, 1, 1, 1, 1, 1]] * 2, shape=(2, 8)
88-
),
89-
"mask_positions": tf.constant([[2, 4]] * 2, shape=(2, 2)),
90-
"segment_ids": tf.constant([[0, 0, 0, 0, 0, 0, 0, 0]] * 2, shape=(2, 8))
83+
"token_ids": np.array([[1, 2, 0, 4, 0, 6, 7, 8]] * 2),
84+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1]] * 2),
85+
"mask_positions": np.array([[2, 4]] * 2),
86+
"segment_ids": np.array([[0, 0, 0, 0, 0, 0, 0, 0]] * 2)
9187
}
9288
# Labels are the original masked values.
9389
labels = [[3, 5]] * 2

keras_nlp/models/deberta_v3/deberta_v3_backbone.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,8 @@ class DebertaV3Backbone(Backbone):
7373
Example usage:
7474
```python
7575
input_data = {
76-
"token_ids": tf.ones(shape=(1, 12), dtype="int64"),
77-
"padding_mask": tf.constant(
78-
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)),
76+
"token_ids": np.ones(shape=(1, 12), dtype="int32"),
77+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]),
7978
}
8079
8180
# Pretrained DeBERTa encoder.

keras_nlp/models/deberta_v3/deberta_v3_classifier.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -95,10 +95,8 @@ class DebertaV3Classifier(Task):
9595
Preprocessed integer data.
9696
```python
9797
features = {
98-
"token_ids": tf.ones(shape=(2, 12), dtype="int64"),
99-
"padding_mask": tf.constant(
100-
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12)
101-
),
98+
"token_ids": np.ones(shape=(2, 12), dtype="int32"),
99+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2),
102100
}
103101
labels = [0, 3]
104102

0 commit comments

Comments
 (0)