Skip to content

Commit 81c8e1e

Browse files
committed
fix(clip): transformer 少一层
Signed-off-by: YdrMaster <[email protected]>
1 parent 7ec259a commit 81c8e1e

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

models/clip/common/src/compute.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ where
202202
self.layer_norm(&mut x, &inplace, wb, workspace, queue_alloc)?
203203
}
204204

205-
for iblk in 0..nblk {
205+
for iblk in 0..=nblk {
206206
{
207207
let wb = self.weights.attn_norm(iblk, queue);
208208
self.layer_norm(&mut x1, &x, wb, workspace, queue_alloc)?;

models/clip/common/src/storage.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ impl<'a> Storage<&'a [u8]> {
6161
projector : ProjectorMeta::from_gguf(gguf),
6262
};
6363
#[rustfmt::skip]
64-
let blocks = (0..meta.nblk)
64+
let blocks = (0..=meta.nblk)
6565
.map(|i| BlkStorage {
6666
attn_norm_w: tensor![gguf => format!("v.blk.{i}.ln1.weight" )].data,
6767
attn_norm_b: tensor![gguf => format!("v.blk.{i}.ln1.bias" )].data,

0 commit comments

Comments
 (0)