@@ -240,28 +240,6 @@ def _offload_to_disk(self):
240240 os .makedirs (os .path .dirname (self .safetensors_file_path ), exist_ok = True )
241241 tensors_to_save = {key : tensor .data .to (self .offload_device ) for tensor , key in self .tensor_to_key .items ()}
242242 safetensors .torch .save_file (tensors_to_save , self .safetensors_file_path )
243- def offload_ (self ):
244- r"""Offloads the group of modules to the offload_device."""
245- if self .offload_to_disk_path :
246- # TODO: we can potentially optimize this code path by checking if the _all_ the desired
247- # safetensor files exist on the disk and if so, skip this step entirely, reducing IO
248- # overhead. Currently, we just check if the given `safetensors_file_path` exists and if not
249- # we perform a write.
250- # Check if the file has been saved in this session or if it already exists on disk.
251- if not self ._is_offloaded_to_disk and not os .path .exists (self .safetensors_file_path ):
252- os .makedirs (os .path .dirname (self .safetensors_file_path ), exist_ok = True )
253- tensors_to_save = {
254- key : tensor .data .to (self .offload_device ) for tensor , key in self .tensor_to_key .items ()
255- }
256- safetensors .torch .save_file (tensors_to_save , self .safetensors_file_path )
257-
258- # The group is now considered offloaded to disk for the rest of the session.
259- self ._is_offloaded_to_disk = True
260-
261- # We do this to free up the RAM which is still holding the up tensor data.
262- for tensor_obj in self .tensor_to_key .keys ():
263- tensor_obj .data = torch .empty_like (tensor_obj .data , device = self .offload_device )
264- return
265243
266244 # The group is now considered offloaded to disk for the rest of the session.
267245 self ._is_offloaded_to_disk = True
0 commit comments