diff --git a/10items.json b/10items.json new file mode 100644 index 0000000..73cfdc8 --- /dev/null +++ b/10items.json @@ -0,0 +1,52 @@ +[ + { + "title": "Lawmakers are skeptical of Zuckerberg’s commitment to free speech", + "published": "2025-04-09T18:41:28-04:00", + "link": "https://www.theverge.com/news/646288/congress-zuckerberg-wynn-williams-careless-people" + }, + { + "title": "How to choose which Apple Watch to buy", + "published": "2025-04-09T17:36:13-04:00", + "link": "https://www.theverge.com/23037217/best-apple-watch-series-se-ultra" + }, + { + "title": "Adobe is building AI agents for Photoshop and Premiere Pro", + "published": "2025-04-09T17:10:20-04:00", + "link": "https://www.theverge.com/news/646205/adobe-photoshop-premiere-pro-ai-creative-agent-actions" + }, + { + "title": "Aqara adds support for 50 new Matter device types", + "published": "2025-04-09T17:06:23-04:00", + "link": "https://www.theverge.com/news/646199/aqara-matter-support-50-new-device-types" + }, + { + "title": "Framework’s entry-level modular Laptop 12 starts at £499", + "published": "2025-04-09T16:40:28-04:00", + "link": "https://www.theverge.com/news/646031/framework-laptop-12-price-preorder-uk-eu-ca" + }, + { + "title": "Trump says the future of AI is powered by coal", + "published": "2025-04-09T15:25:16-04:00", + "link": "https://www.theverge.com/energy/646011/trump-says-the-future-of-ai-is-powered-by-coal" + }, + { + "title": "Framework raised prices and then un-raised them an hour later because of Trump", + "published": "2025-04-09T15:09:58-04:00", + "link": "https://www.theverge.com/news/646057/framework-raised-prices-and-then-un-raised-them-an-hour-later-because-of-trump" + }, + { + "title": "Gemini can now turn your Google Docs into podcasts", + "published": "2025-04-09T14:59:27-04:00", + "link": "https://www.theverge.com/news/645986/gemini-can-now-turn-your-google-docs-into-podcasts" + }, + { + "title": "Smart home device manufacturers are bracing for chaos — again", + "published": "2025-04-09T14:26:40-04:00", + "link": "https://www.theverge.com/smart-home/645927/smart-home-device-manufacturers-are-bracing-for-chaos-again" + }, + { + "title": "One of our favorite video doorbells is on sale for $80", + "published": "2025-04-09T14:03:22-04:00", + "link": "https://www.theverge.com/tech/645930/one-of-our-favorite-video-doorbells-is-on-sale-for-80" + } +] \ No newline at end of file diff --git a/PR.py b/PR.py new file mode 100644 index 0000000..8bcfe47 --- /dev/null +++ b/PR.py @@ -0,0 +1,30 @@ +S C:\Users\gisel\desktop\challenge_1> git add challenge1.py +warning: in the working copy of 'challenge1.py', LF will be replaced by CRLF the next time Git touches it +PS C:\Users\gisel\desktop\challenge_1> git status +On branch cambios_en_read_me +Changes to be committed: + (use "git restore --staged ..." to unstage) + new file: challenge1.py + +Changes not staged for commit: + (use "git add ..." to update what will be committed) + (use "git restore ..." to discard changes in working directory) + modified: README.md + +PS C:\Users\gisel\desktop\challenge_1> git commit m- "cambios en readme y agregar archivo en nueva branch" +error: pathspec 'm-' did not match any file(s) known to git +error: pathspec 'cambios en readme y agregar archivo en nueva branch' did not match any file(s) known to git +PS C:\Users\gisel\desktop\challenge_1> git commit -m "cambios en readme y archivo nuevo" +[cambios_en_read_me 80695c0] cambios en readme y archivo nuevo + 1 file changed, 20 insertions(+) + create mode 100644 challenge1.py +PS C:\Users\gisel\desktop\challenge_1> git push origin cambios_en_read_me +Enumerating objects: 4, done. +Counting objects: 100% (4/4), done. +Delta compression using up to 16 threads +Compressing objects: 100% (3/3), done. +Writing objects: 100% (3/3), 536 bytes | 268.00 KiB/s, done. +Total 3 (delta 0), reused 0 (delta 0), pack-reused 0 (from 0) +To https://github.com/mundo-python/challenge_1 + 22acf23..80695c0 cambios_en_read_me -> cambios_en_read_me +PS C:\Users\gisel\desktop\challenge_1> \ No newline at end of file diff --git a/README.md b/README.md index 0471e62..dd0f2ee 100644 --- a/README.md +++ b/README.md @@ -8,3 +8,4 @@ Linea agregada por Giselle Cambio 2 again +cambio 3 \ No newline at end of file diff --git a/ch1.py b/ch1.py new file mode 100644 index 0000000..87ec68c --- /dev/null +++ b/ch1.py @@ -0,0 +1,39 @@ +import requests +import feedparser +import json + +url = f'https://www.theverge.com/rss/index.xml' +r= requests.get(url) + +if r.status_code != 200: + print(f"error with status code {r.status_code}") + +#los primeros 10 items + +feed = feedparser.parse(url) + +items = feed.entries[:10] + +for i, item in enumerate(items, start=1): + title = item.title + pub_date = item.published + link = item.link + + print(f"{i}. Title: {title}") + print(f"Date: {pub_date}") + print(f"Link: {link}") + + #Guardar un archivo JSON +data = [] +for item in items: + lista = { + "title": item.title, + "published": item.published, + "link": item.link + } + data.append(lista) + + +with open("10items.json", "w", encoding="utf-8") as f: + json.dump(data, f, ensure_ascii=False, indent=4) + diff --git a/challenge1.py b/challenge1.py deleted file mode 100644 index 4a1fa2a..0000000 --- a/challenge1.py +++ /dev/null @@ -1,20 +0,0 @@ -import requests -from bs4 import BeautifulSoup - -url = f'https://www.theverge.com/rss/index.xml' -r= requests.get(url) - - -soup = BeautifulSoup(r.text, 'lxml') -#print(soup) - -items = soup.find_all("item", limit = 10) - -for item in items: - title = item.title.text - pub_date = item.pubDate.text - link = item.link.text - print(f"Title: {title}") - print(f"Date: {pub_date}") - print(f"Link: {link}") - diff --git a/github cesar .pdf b/github cesar .pdf new file mode 100644 index 0000000..09d98ad Binary files /dev/null and b/github cesar .pdf differ diff --git a/how_to_create_a_branch.pdf b/how_to_create_a_branch.pdf new file mode 100644 index 0000000..9ce4625 Binary files /dev/null and b/how_to_create_a_branch.pdf differ diff --git a/kavak.py b/kavak.py new file mode 100644 index 0000000..060e22a --- /dev/null +++ b/kavak.py @@ -0,0 +1,45 @@ +import requests +from bs4 import BeautifulSoup +import time +import json + +url = "https://www.kavak.com/mx/seminuevos/coupe?page=0" +response = requests.get(url) +if response.status_code != 200: + print(f"Error con code {response.status_code}") + +paginas = 6 +coupes = [] + +for pagina in range(paginas): + url_salto = url + str(pagina) + print(f"Consultando: {url_salto}") + + respuesta = requests.get(url_salto) + soup = BeautifulSoup(respuesta.text, "html.parser") + + auto_tag = soup.find_all("h3", class_ = "card-product_cardProduct__title__RR0CK") + info_tag = soup.find_all("p", class_ = "card-product_cardProduct__subtitle__hbN2a") + precio_tag = soup.find_all("span", class_ = "amount_uki-amount__large__price__2NvVx") + + for a, info, precio, in zip(auto_tag, info_tag, precio_tag): + auto = a.text.strip() + partes_info = info.text.strip().split(" • ") + año = partes_info[0] + kilometraje = partes_info[1] + precio = precio.text.strip() + precio_clean = precio.replace("$", "").replace(",", "").strip() + precio_num = int(precio_clean) + + coupes.append( + {"modelo": auto, + "año": año, + "kilometraje": kilometraje, + "precio": precio, + "precio_num": precio_num + }) + +coupes_ordenados = sorted(coupes, key=lambda x: x["precio_num"]) + +with open("coupes.json", "w", encoding = "utf-8") as f: + json.dump(coupes_ordenados, f, ensure_ascii = False, indent = 4)