From fbd56132feb46f1015858b704e2af5ea716ecff0 Mon Sep 17 00:00:00 2001 From: GigiHB Date: Tue, 8 Apr 2025 21:46:57 -0600 Subject: [PATCH 1/2] PR --- PR.py | 30 ++++++++++++++++++++++++++++++ ch1.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 PR.py create mode 100644 ch1.py diff --git a/PR.py b/PR.py new file mode 100644 index 0000000..8bcfe47 --- /dev/null +++ b/PR.py @@ -0,0 +1,30 @@ +S C:\Users\gisel\desktop\challenge_1> git add challenge1.py +warning: in the working copy of 'challenge1.py', LF will be replaced by CRLF the next time Git touches it +PS C:\Users\gisel\desktop\challenge_1> git status +On branch cambios_en_read_me +Changes to be committed: + (use "git restore --staged ..." to unstage) + new file: challenge1.py + +Changes not staged for commit: + (use "git add ..." to update what will be committed) + (use "git restore ..." to discard changes in working directory) + modified: README.md + +PS C:\Users\gisel\desktop\challenge_1> git commit m- "cambios en readme y agregar archivo en nueva branch" +error: pathspec 'm-' did not match any file(s) known to git +error: pathspec 'cambios en readme y agregar archivo en nueva branch' did not match any file(s) known to git +PS C:\Users\gisel\desktop\challenge_1> git commit -m "cambios en readme y archivo nuevo" +[cambios_en_read_me 80695c0] cambios en readme y archivo nuevo + 1 file changed, 20 insertions(+) + create mode 100644 challenge1.py +PS C:\Users\gisel\desktop\challenge_1> git push origin cambios_en_read_me +Enumerating objects: 4, done. +Counting objects: 100% (4/4), done. +Delta compression using up to 16 threads +Compressing objects: 100% (3/3), done. +Writing objects: 100% (3/3), 536 bytes | 268.00 KiB/s, done. +Total 3 (delta 0), reused 0 (delta 0), pack-reused 0 (from 0) +To https://github.com/mundo-python/challenge_1 + 22acf23..80695c0 cambios_en_read_me -> cambios_en_read_me +PS C:\Users\gisel\desktop\challenge_1> \ No newline at end of file diff --git a/ch1.py b/ch1.py new file mode 100644 index 0000000..3f9fb98 --- /dev/null +++ b/ch1.py @@ -0,0 +1,43 @@ +import requests + + +url = f'https://www.theverge.com/rss/index.xml' +r= requests.get(url) + +if r.status_code == 200: + xml_content = r.text + print(xml_content) +else: + print("error") + +#los primeros 10 items + +import feedparser +feed = feedparser.parse(url) + +items = feed.entries[:10] + +for i, item in enumerate(items, start=1): + title = item.title + pub_date = item.published + link = item.link + + print(f"{i}. Title: {title}") + print(f"Date: {pub_date}") + print(f"Link: {link}") + + #Guardar un archivo JSON +import json +data = [] +for item in items: + lista = { + "title": item.title, + "published": item.published, + "link": item.link + } + data.append(lista) + + +with open("10items.json", "w", encoding="utf-8") as f: + json.dump(data, f, ensure_ascii=False, indent=4) + From a38ceaf042f89600028f421da67f28694daa51ee Mon Sep 17 00:00:00 2001 From: GigiHB Date: Tue, 8 Apr 2025 22:03:34 -0600 Subject: [PATCH 2/2] correciones --- ch1.py | 12 ++++-------- challenge1.py | 20 -------------------- 2 files changed, 4 insertions(+), 28 deletions(-) delete mode 100644 challenge1.py diff --git a/ch1.py b/ch1.py index 3f9fb98..87ec68c 100644 --- a/ch1.py +++ b/ch1.py @@ -1,18 +1,15 @@ import requests - +import feedparser +import json url = f'https://www.theverge.com/rss/index.xml' r= requests.get(url) -if r.status_code == 200: - xml_content = r.text - print(xml_content) -else: - print("error") +if r.status_code != 200: + print(f"error with status code {r.status_code}") #los primeros 10 items -import feedparser feed = feedparser.parse(url) items = feed.entries[:10] @@ -27,7 +24,6 @@ print(f"Link: {link}") #Guardar un archivo JSON -import json data = [] for item in items: lista = { diff --git a/challenge1.py b/challenge1.py deleted file mode 100644 index 4a1fa2a..0000000 --- a/challenge1.py +++ /dev/null @@ -1,20 +0,0 @@ -import requests -from bs4 import BeautifulSoup - -url = f'https://www.theverge.com/rss/index.xml' -r= requests.get(url) - - -soup = BeautifulSoup(r.text, 'lxml') -#print(soup) - -items = soup.find_all("item", limit = 10) - -for item in items: - title = item.title.text - pub_date = item.pubDate.text - link = item.link.text - print(f"Title: {title}") - print(f"Date: {pub_date}") - print(f"Link: {link}") -