Skip to content

Commit c596c2c

Browse files
authored
Merge pull request #108 from merciajeno/main
Clustering movie summaries added
2 parents b087e8b + f127f3d commit c596c2c

File tree

1 file changed

+99
-0
lines changed

1 file changed

+99
-0
lines changed

backlog/ClusteringOfIMDBMovies.py

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
import requests
2+
from bs4 import BeautifulSoup
3+
from sklearn.feature_extraction.text import TfidfVectorizer
4+
from sklearn.cluster import KMeans
5+
from sklearn.decomposition import PCA
6+
from sklearn.metrics import silhouette_score
7+
import matplotlib.pyplot as plt
8+
import warnings
9+
warnings.filterwarnings("ignore")
10+
11+
def scrape_imdb_top_movies(num_movies):
12+
headers = {
13+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
14+
}
15+
request = requests.get('https://www.imdb.com/chart/top/?ref_=login', headers=headers)
16+
content = request.content
17+
soup = BeautifulSoup(content, 'html.parser')
18+
movie_link = soup.find_all('a', {"class": "ipc-title-link-wrapper"})
19+
20+
hrefs = []
21+
movie_titles = []
22+
for movie in movie_link:
23+
text = movie.text
24+
if text[0].isdigit():
25+
movie_titles.append(text)
26+
hrefs.append(movie.get("href"))
27+
28+
summaries = []
29+
for index in range(num_movies):
30+
url = "https://www.imdb.com" + hrefs[index]
31+
print(f"Fetching summary for: {movie_titles[index]}")
32+
r = requests.get(url, headers=headers)
33+
url_soup = BeautifulSoup(r.content, 'html.parser')
34+
summary = url_soup.find('span', {'data-testid': 'plot-l'}).text if url_soup.find('span', {'data-testid': 'plot-l'}) else "No summary available"
35+
summaries.append(summary)
36+
37+
return movie_titles[:num_movies], summaries
38+
39+
40+
num_movies = 250
41+
movie_titles, summaries = scrape_imdb_top_movies(num_movies)
42+
43+
44+
vectorizer = TfidfVectorizer(stop_words='english')
45+
tfidf_matrix = vectorizer.fit_transform(summaries)
46+
47+
48+
pca = PCA(n_components=2)
49+
tfidf_pca = pca.fit_transform(tfidf_matrix.toarray())
50+
51+
52+
# Elbow Method
53+
sum_of_squared_distances = []
54+
K = range(2, min(10, num_movies)) # Adjust the range to be less than or equal to the number of samples
55+
for k in K:
56+
km = KMeans(n_clusters=k)
57+
km = km.fit(tfidf_pca)
58+
sum_of_squared_distances.append(km.inertia_)
59+
60+
plt.figure(figsize=(10, 7))
61+
plt.plot(K, sum_of_squared_distances, 'bx-')
62+
plt.xlabel('Number of clusters (k)')
63+
plt.ylabel('Sum of squared distances')
64+
plt.title('Elbow Method for Optimal k')
65+
plt.show()
66+
67+
68+
silhouette_avg = []
69+
for k in range(2, min(10, num_movies)): # Adjust the range to be less than or equal to the number of samples
70+
kmeans = KMeans(n_clusters=k)
71+
kmeans.fit(tfidf_pca)
72+
labels = kmeans.labels_
73+
silhouette_avg.append(silhouette_score(tfidf_pca, labels))
74+
75+
plt.figure(figsize=(10, 7))
76+
plt.plot(range(2, min(10, num_movies)), silhouette_avg, 'bx-')
77+
plt.xlabel('Number of clusters (k)')
78+
plt.ylabel('Silhouette Score')
79+
plt.title('Silhouette Score for Optimal k')
80+
plt.show()
81+
82+
# Choose the optimal number of clusters
83+
optimal_k = 5
84+
# K-means Clustering with Optimal k
85+
kmeans = KMeans(n_clusters=optimal_k)
86+
kmeans.fit(tfidf_pca)
87+
labels = kmeans.labels_
88+
89+
# Visualization
90+
plt.figure(figsize=(10, 7))
91+
for i in range(optimal_k):
92+
points = tfidf_pca[labels == i]
93+
plt.scatter(points[:, 0], points[:, 1], label=f'Cluster {i}')
94+
95+
plt.xlabel('PCA Component 1')
96+
plt.ylabel('PCA Component 2')
97+
plt.title('K-means Clustering of IMDb Movie Summaries (after PCA)')
98+
plt.legend()
99+
plt.show()

0 commit comments

Comments
 (0)