@@ -3666,9 +3666,11 @@ export class CLIPModel extends CLIPPreTrainedModel { }
3666
3666
export class CLIPTextModel extends CLIPPreTrainedModel {
3667
3667
/** @type {typeof PreTrainedModel.from_pretrained } */
3668
3668
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
3669
- // Update default model file name if not provided
3670
- options . model_file_name ??= 'text_model' ;
3671
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
3669
+ return super . from_pretrained ( pretrained_model_name_or_path , {
3670
+ // Update default model file name if not provided
3671
+ model_file_name : 'text_model' ,
3672
+ ...options ,
3673
+ } ) ;
3672
3674
}
3673
3675
}
3674
3676
@@ -3701,9 +3703,11 @@ export class CLIPTextModel extends CLIPPreTrainedModel {
3701
3703
export class CLIPTextModelWithProjection extends CLIPPreTrainedModel {
3702
3704
/** @type {typeof PreTrainedModel.from_pretrained } */
3703
3705
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
3704
- // Update default model file name if not provided
3705
- options . model_file_name ??= 'text_model' ;
3706
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
3706
+ return super . from_pretrained ( pretrained_model_name_or_path , {
3707
+ // Update default model file name if not provided
3708
+ model_file_name : 'text_model' ,
3709
+ ...options ,
3710
+ } ) ;
3707
3711
}
3708
3712
}
3709
3713
@@ -3713,9 +3717,11 @@ export class CLIPTextModelWithProjection extends CLIPPreTrainedModel {
3713
3717
export class CLIPVisionModel extends CLIPPreTrainedModel {
3714
3718
/** @type {typeof PreTrainedModel.from_pretrained } */
3715
3719
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
3716
- // Update default model file name if not provided
3717
- options . model_file_name ??= 'vision_model' ;
3718
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
3720
+ return super . from_pretrained ( pretrained_model_name_or_path , {
3721
+ // Update default model file name if not provided
3722
+ model_file_name : 'vision_model' ,
3723
+ ...options ,
3724
+ } ) ;
3719
3725
}
3720
3726
}
3721
3727
@@ -3748,9 +3754,11 @@ export class CLIPVisionModel extends CLIPPreTrainedModel {
3748
3754
export class CLIPVisionModelWithProjection extends CLIPPreTrainedModel {
3749
3755
/** @type {typeof PreTrainedModel.from_pretrained } */
3750
3756
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
3751
- // Update default model file name if not provided
3752
- options . model_file_name ??= 'vision_model' ;
3753
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
3757
+ return super . from_pretrained ( pretrained_model_name_or_path , {
3758
+ // Update default model file name if not provided
3759
+ model_file_name : 'vision_model' ,
3760
+ ...options ,
3761
+ } ) ;
3754
3762
}
3755
3763
}
3756
3764
//////////////////////////////////////////////////
@@ -3834,9 +3842,11 @@ export class SiglipModel extends SiglipPreTrainedModel { }
3834
3842
export class SiglipTextModel extends SiglipPreTrainedModel {
3835
3843
/** @type {typeof PreTrainedModel.from_pretrained } */
3836
3844
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
3837
- // Update default model file name if not provided
3838
- options . model_file_name ??= 'text_model' ;
3839
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
3845
+ return super . from_pretrained ( pretrained_model_name_or_path , {
3846
+ // Update default model file name if not provided
3847
+ model_file_name : 'text_model' ,
3848
+ ...options ,
3849
+ } ) ;
3840
3850
}
3841
3851
}
3842
3852
@@ -3869,9 +3879,11 @@ export class SiglipTextModel extends SiglipPreTrainedModel {
3869
3879
export class SiglipVisionModel extends CLIPPreTrainedModel {
3870
3880
/** @type {typeof PreTrainedModel.from_pretrained } */
3871
3881
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
3872
- // Update default model file name if not provided
3873
- options . model_file_name ??= 'vision_model' ;
3874
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
3882
+ return super . from_pretrained ( pretrained_model_name_or_path , {
3883
+ // Update default model file name if not provided
3884
+ model_file_name : 'vision_model' ,
3885
+ ...options ,
3886
+ } ) ;
3875
3887
}
3876
3888
}
3877
3889
//////////////////////////////////////////////////
@@ -3926,18 +3938,22 @@ export class JinaCLIPModel extends JinaCLIPPreTrainedModel {
3926
3938
export class JinaCLIPTextModel extends JinaCLIPPreTrainedModel {
3927
3939
/** @type {typeof PreTrainedModel.from_pretrained } */
3928
3940
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
3929
- // Update default model file name if not provided
3930
- options . model_file_name ??= 'text_model' ;
3931
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
3941
+ return super . from_pretrained ( pretrained_model_name_or_path , {
3942
+ // Update default model file name if not provided
3943
+ model_file_name : 'text_model' ,
3944
+ ...options ,
3945
+ } ) ;
3932
3946
}
3933
3947
}
3934
3948
3935
3949
export class JinaCLIPVisionModel extends JinaCLIPPreTrainedModel {
3936
3950
/** @type {typeof PreTrainedModel.from_pretrained } */
3937
3951
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
3938
- // Update default model file name if not provided
3939
- options . model_file_name ??= 'vision_model' ;
3940
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
3952
+ return super . from_pretrained ( pretrained_model_name_or_path , {
3953
+ // Update default model file name if not provided
3954
+ model_file_name : 'vision_model' ,
3955
+ ...options ,
3956
+ } ) ;
3941
3957
}
3942
3958
}
3943
3959
//////////////////////////////////////////////////
@@ -6159,9 +6175,11 @@ export class ClapModel extends ClapPreTrainedModel { }
6159
6175
export class ClapTextModelWithProjection extends ClapPreTrainedModel {
6160
6176
/** @type {typeof PreTrainedModel.from_pretrained } */
6161
6177
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
6162
- // Update default model file name if not provided
6163
- options . model_file_name ??= 'text_model' ;
6164
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
6178
+ return super . from_pretrained ( pretrained_model_name_or_path , {
6179
+ // Update default model file name if not provided
6180
+ model_file_name : 'text_model' ,
6181
+ ...options ,
6182
+ } ) ;
6165
6183
}
6166
6184
}
6167
6185
@@ -6194,9 +6212,11 @@ export class ClapTextModelWithProjection extends ClapPreTrainedModel {
6194
6212
export class ClapAudioModelWithProjection extends ClapPreTrainedModel {
6195
6213
/** @type {typeof PreTrainedModel.from_pretrained } */
6196
6214
static async from_pretrained ( pretrained_model_name_or_path , options = { } ) {
6197
- // Update default model file name if not provided
6198
- options . model_file_name ??= 'audio_model' ;
6199
- return super . from_pretrained ( pretrained_model_name_or_path , options ) ;
6215
+ return super . from_pretrained ( pretrained_model_name_or_path , {
6216
+ // Update default model file name if not provided
6217
+ model_file_name : 'audio_model' ,
6218
+ ...options ,
6219
+ } ) ;
6200
6220
}
6201
6221
}
6202
6222
//////////////////////////////////////////////////
0 commit comments