-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtask8.py
More file actions
159 lines (132 loc) · 4.59 KB
/
task8.py
File metadata and controls
159 lines (132 loc) · 4.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
# import os,json,requests,task1
# from bs4 import BeautifulSoup
# from pprint import pprint
# def movie_details(link0):
# # if os.path.exists("/home/navgurukul/Desktop/rAju/data/aanand.json"):
# # with open("/home/navgurukul/Desktop/rAju/data/aanand.json") as file:
# page=requests.get(link0).text
# # data0=file.read()
# genre_lst=[]
# director_lst=[]
# data=BeautifulSoup(page,"html.parser")
# name=data.find(class_="title_wrapper").h1.text
# timeClass=data.find(class_="title_wrapper")
# time=(timeClass.find(class_="subtext").time.text).strip()
# genre_class=timeClass.find(class_="subtext")
# genre_a=genre_class.find_all("a")
# for i in range(len(genre_a)-1):
# genre_lst.append(genre_a[i].text)
# bio=data.find(class_="summary_text").text
# director_class=data.find(class_="credit_summary_item")
# director_a=director_class.find_all("a")
# for i in range(len(director_a)):
# director_lst.append(director_a[i].text)
# link=data.find(class_="poster").img["src"]
# exta_details=data.find("div",attrs={"class":"article","id":"titleDetails"})
# txt_div=exta_details.find_all("div",class_="txt-block")
# count=0
# for div in txt_div:
# if count==2:
# break
# elif div.h4.text== "Country:":
# country_all= div.find_all("a")
# country=[a.text for a in country_all]
# count+=1
# elif div.h4.text=="Language:":
# language_all=div.find_all("a")
# language=[a.text for a in language_all]
# count+=1
# # movie_details_list=[]
# s={}
# s["name"]=name[:-8]
# s["country"]="india"
# s["genre"]=genre_lst
# s["running_time"]=time
# s["minfo"]=bio.strip()
# s["director"]=director_lst
# s["poster_link"]=link
# s["language"]=language
# s["country"]=country
# # return s
# with open("/home/navgurukul/Desktop/rAju/data/task0/"+link0[-10:-1]+".json","w") as page:
# page.write(json.dumps(s))
# # else:
# # data0=requests.get(link).text
# # with open("/home/navgurukul/Desktop/rAju/data/aanand.json","w") as file:
# # file.write(data0)
# movies_list=task1.top_250movies()
# # movie_details_list=[]
# for movie in movies_list[5:]:
# x=movie["link"]
# movie_details(x)
# movie_details_list.append(movie_details(x))
# pprint(movie_details_list)
########################################################33
import os,json,requests,time,random,task1
from bs4 import BeautifulSoup
from pprint import pprint
def movie_detailsLst(movies):
movie_details_list=[]
for movie in movies:
link0=movie["link"]
if os.path.exists("/home/navgurukul/Desktop/rAju/data/task0/"+link0[-10:-1]+".json"):
with open("/home/navgurukul/Desktop/rAju/data/task0/"+link0[-10:-1]+".json") as file:
movie_details_list.append(json.loads(file.read()))
else:
import time
time.sleep(random.randint(1,3))
page=requests.get(link0).text
# data0=file.read()
genre_lst=[]
director_lst=[]
data=BeautifulSoup(page,"html.parser")
name=data.find(class_="title_wrapper").h1.text
timeClass=data.find(class_="title_wrapper")
time=(timeClass.find(class_="subtext").time.text).strip()
genre_class=timeClass.find(class_="subtext")
genre_a=genre_class.find_all("a")
for i in range(len(genre_a)-1):
genre_lst.append(genre_a[i].text)
bio=data.find(class_="summary_text").text
director_class=data.find(class_="credit_summary_item")
director_a=director_class.find_all("a")
for i in range(len(director_a)):
director_lst.append(director_a[i].text)
link=data.find(class_="poster").img["src"]
exta_details=data.find("div",attrs={"class":"article","id":"titleDetails"})
txt_div=exta_details.find_all("div",class_="txt-block")
count=0
for div in txt_div:
if count==2:
break
elif div.h4.text== "Country:":
country_all= div.find_all("a")
country=[a.text for a in country_all]
count+=1
elif div.h4.text=="Language:":
language_all=div.find_all("a")
language=[a.text for a in language_all]
count+=1
s={}
s["name"]=name[:-8]
s["country"]="india"
s["genre"]=genre_lst
if len(time)==2:
s["running_time"]=int(time[0])*60
else:
s["running_time"]=int(time[0])*60+int(time[3])
s["minfo"]=bio.strip()
s["director"]=director_lst
s["poster_link"]=link
s["language"]=language
s["country"]=country
movie_details_list.append(json.dumps(s))
with open("/home/navgurukul/Desktop/rAju/data/task0/"+link0[-10:-1]+".json","w") as file:
file.write(json.dumps(s))
return movie_details_list
# else:
# data0=requests.get(link).text
# with open("/home/navgurukul/Desktop/rAju/data/aanand.json","w") as file:
# file.write(data0)
# moviesLst=task1.top_250movies()
# pprint(movie_detailsLst(moviesLst))