@@ -22,19 +22,19 @@ npm install website-scraper
2222
2323## Usage
2424``` javascript
25- var scraper = require (' website-scraper' );
25+ var scrape = require (' website-scraper' );
2626var options = {
2727 urls: [' http://nodejs.org/' ],
2828 directory: ' /path/to/save/' ,
2929};
3030
3131// with callback
32- scraper . scrape (options, function (error , result ) {
32+ scrape (options, function (error , result ) {
3333 /* some code here */
3434});
3535
3636// or with promise
37- scraper . scrape (options).then (function (result ) {
37+ scrape (options).then (function (result ) {
3838 /* some code here */
3939});
4040```
@@ -98,8 +98,8 @@ and separate files into directories:
9898 - ` css ` for .css (full path ` /path/to/save/css ` )
9999
100100``` javascript
101- var scraper = require (' website-scraper' );
102- scraper . scrape ({
101+ var scrape = require (' website-scraper' );
102+ scrape ({
103103 urls: [
104104 ' http://nodejs.org/' , // Will be saved with default filename 'index.html'
105105 {url: ' http://nodejs.org/about' , filename: ' about.html' },
@@ -132,8 +132,8 @@ scraper.scrape({
132132``` javascript
133133// Links from example.com will be followed
134134// Links from links will be ignored because theirs depth = 2 is greater than maxDepth
135- var scraper = require (' website-scraper' );
136- scraper . scrape ({
135+ var scrape = require (' website-scraper' );
136+ scrape ({
137137 urls: [' http://example.com/' ],
138138 directory: ' /path/to/save' ,
139139 recursive: true ,
@@ -144,8 +144,8 @@ scraper.scrape({
144144#### Example 3. Filtering out external resources
145145``` javascript
146146// Links to other websites are filtered out by the urlFilter
147- var scraper = require (' website-scraper' );
148- scraper . scrape ({
147+ var scrape = require (' website-scraper' );
148+ scrape ({
149149 urls: [' http://example.com/' ],
150150 urlFilter : function (url ){
151151 return url .indexOf (' http://example.com' ) === 0 ;
@@ -159,8 +159,8 @@ scraper.scrape({
159159// Downloads all the crawlable files of example.com.
160160// The files are saved in the same structure as the structure of the website, by using the `bySiteStructure` filenameGenerator.
161161// Links to other websites are filtered out by the urlFilter
162- var scraper = require (' website-scraper' );
163- scraper . scrape ({
162+ var scrape = require (' website-scraper' );
163+ scrape ({
164164 urls: [' http://example.com/' ],
165165 urlFilter : function (url ){
166166 return url .indexOf (' http://example.com' ) === 0 ;
0 commit comments