-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathInversedDocumentFrequency.java
More file actions
90 lines (78 loc) · 3.46 KB
/
InversedDocumentFrequency.java
File metadata and controls
90 lines (78 loc) · 3.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
package utils;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.json.JSONObject;
import java.io.IOException;
import java.util.HashSet;
import java.util.StringTokenizer;
/**
* Calculate Inversed Term Frequency for each word in all documents
*/
public class InversedDocumentFrequency extends Configured implements Tool {
public static class TokenizerMapper
extends Mapper<Object, Text, Text, DoubleWritable> {
private final DoubleWritable one = new DoubleWritable(1.0);
/**
* Map function splits text into words and creates tuples (word, 1) for each unique word in document
* @param key - default key
* @param document - json doc
* @param context - store
*/
public void map(Object key, Text document, Context context) throws IOException, InterruptedException {
JSONObject json = new JSONObject(document.toString());
Text content = new Text(json.get("text").toString());
StringTokenizer words = new StringTokenizer(content.toString(), " \'\n.,!?:()[]{};\\/\"*");
// add word to hash set if it was procedeed
HashSet<String> procedeed = new HashSet<String>();
while (words.hasMoreTokens()) {
String word = words.nextToken().toLowerCase();
if (word.equals("") || procedeed.contains(word)) {
continue;
}
procedeed.add(word);
context.write(new Text(word), one);
}
}
}
public static class Reduce
extends Reducer<Text, DoubleWritable, Text, Text> {
private DoubleWritable result = new DoubleWritable();
/**
* Reduce function sums up all ones (word : 1 1 1 1) to get count of documents
* where word has been found, (e.g word: 4)
* @param key - word
* @param values - array of ones
* @param context - store
*/
public void reduce(Text key, Iterable<DoubleWritable> values,
Context context
) throws IOException, InterruptedException {
int occuredDocCount = 0;
for (DoubleWritable v : values) {
occuredDocCount++;
}
result.set(occuredDocCount);
context.write(new Text(key), new Text(","+result));
}
}
public int run(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Job job = Job.getInstance(getConf(), "idf");
job.setJarByClass(InversedDocumentFrequency.class);
job.setMapperClass(TokenizerMapper.class);
job.setReducerClass(Reduce.class);
job.setMapOutputValueClass(DoubleWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.addInputPath(job, new Path(args[1]));
FileOutputFormat.setOutputPath(job, new Path(Paths.IDF_OUT));
return job.waitForCompletion(true) ? 0 : 1;
}
}