forked from skmdabdullah/Big-Data-Hadoop-Production-Scripts
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathspark apps
More file actions
84 lines (36 loc) · 1.68 KB
/
spark apps
File metadata and controls
84 lines (36 loc) · 1.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
############## Spark on Cloudera #############
--*** Testing Spark ***---
hdfs dfs -put poems /input
cd /opt/cloudera/parcels/CDH/lib/spark/bin/
spark-shell
val myfile = sc.textFile("hdfs://ip-172-31-9-22.us-west-2.compute.internal:8020/input/")
val counts = myfile.flatMap(line => line.split(" ")).map(word => (word, 1)).reduceByKey(_ + _)
counts.saveAsTextFile("hdfs://ip-172-31-9-22.us-west-2.compute.internal:8020/out1/")
pyspark
myfile = sc.textFile("hdfs://ip-172-31-9-22.us-west-2.compute.internal:8020/input/")
counts = myfile.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda v1,v2: v1 + v2)
counts.saveAsTextFile("hdfs://ip-172-31-9-22.us-west-2.compute.internal:8020/out2/")
spark-submit --class org.apache.spark.examples.SparkPi --master yarn \
--deploy-mode client /opt/cloudera/parcels/CDH/lib/spark/lib/spark-examples.jar 10
spark-submit --class org.apache.spark.examples.SparkPi --master yarn \
--deploy-mode cluster /opt/cloudera/parcels/CDH/lib/spark/lib/spark-examples.jar 10
--*** Checking Configuration ***--
In spark - search
spark.dynamicAllocation.enabled
#############################
---**** Hive on Spark ****---
#############################
## Check whether Hive on spark is enabled
In Hive - search - Spark On YARN Service
## Check the default execution engine
In Hive - search - Default Execution Engine
## Connect to Hue
--> Determine the execution engine
set hive.execution.engine;
--> Set it to spark
set hive.execution.engine=spark;
set hive.execution.engine;
## Fire some queries
create table mytable (a int,b string);
insert into table mytable (1,'abcd');
(check the info logs for spark details )