diff --git a/PR.py b/PR.py new file mode 100644 index 0000000..8bcfe47 --- /dev/null +++ b/PR.py @@ -0,0 +1,30 @@ +S C:\Users\gisel\desktop\challenge_1> git add challenge1.py +warning: in the working copy of 'challenge1.py', LF will be replaced by CRLF the next time Git touches it +PS C:\Users\gisel\desktop\challenge_1> git status +On branch cambios_en_read_me +Changes to be committed: + (use "git restore --staged ..." to unstage) + new file: challenge1.py + +Changes not staged for commit: + (use "git add ..." to update what will be committed) + (use "git restore ..." to discard changes in working directory) + modified: README.md + +PS C:\Users\gisel\desktop\challenge_1> git commit m- "cambios en readme y agregar archivo en nueva branch" +error: pathspec 'm-' did not match any file(s) known to git +error: pathspec 'cambios en readme y agregar archivo en nueva branch' did not match any file(s) known to git +PS C:\Users\gisel\desktop\challenge_1> git commit -m "cambios en readme y archivo nuevo" +[cambios_en_read_me 80695c0] cambios en readme y archivo nuevo + 1 file changed, 20 insertions(+) + create mode 100644 challenge1.py +PS C:\Users\gisel\desktop\challenge_1> git push origin cambios_en_read_me +Enumerating objects: 4, done. +Counting objects: 100% (4/4), done. +Delta compression using up to 16 threads +Compressing objects: 100% (3/3), done. +Writing objects: 100% (3/3), 536 bytes | 268.00 KiB/s, done. +Total 3 (delta 0), reused 0 (delta 0), pack-reused 0 (from 0) +To https://github.com/mundo-python/challenge_1 + 22acf23..80695c0 cambios_en_read_me -> cambios_en_read_me +PS C:\Users\gisel\desktop\challenge_1> \ No newline at end of file diff --git a/ch1.py b/ch1.py new file mode 100644 index 0000000..87ec68c --- /dev/null +++ b/ch1.py @@ -0,0 +1,39 @@ +import requests +import feedparser +import json + +url = f'https://www.theverge.com/rss/index.xml' +r= requests.get(url) + +if r.status_code != 200: + print(f"error with status code {r.status_code}") + +#los primeros 10 items + +feed = feedparser.parse(url) + +items = feed.entries[:10] + +for i, item in enumerate(items, start=1): + title = item.title + pub_date = item.published + link = item.link + + print(f"{i}. Title: {title}") + print(f"Date: {pub_date}") + print(f"Link: {link}") + + #Guardar un archivo JSON +data = [] +for item in items: + lista = { + "title": item.title, + "published": item.published, + "link": item.link + } + data.append(lista) + + +with open("10items.json", "w", encoding="utf-8") as f: + json.dump(data, f, ensure_ascii=False, indent=4) + diff --git a/challenge1.py b/challenge1.py deleted file mode 100644 index 4a1fa2a..0000000 --- a/challenge1.py +++ /dev/null @@ -1,20 +0,0 @@ -import requests -from bs4 import BeautifulSoup - -url = f'https://www.theverge.com/rss/index.xml' -r= requests.get(url) - - -soup = BeautifulSoup(r.text, 'lxml') -#print(soup) - -items = soup.find_all("item", limit = 10) - -for item in items: - title = item.title.text - pub_date = item.pubDate.text - link = item.link.text - print(f"Title: {title}") - print(f"Date: {pub_date}") - print(f"Link: {link}") -