Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 52 additions & 0 deletions 10items.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
[
{
"title": "Lawmakers are skeptical of Zuckerberg’s commitment to free speech",
"published": "2025-04-09T18:41:28-04:00",
"link": "https://www.theverge.com/news/646288/congress-zuckerberg-wynn-williams-careless-people"
},
{
"title": "How to choose which Apple Watch to buy",
"published": "2025-04-09T17:36:13-04:00",
"link": "https://www.theverge.com/23037217/best-apple-watch-series-se-ultra"
},
{
"title": "Adobe is building AI agents for Photoshop and Premiere Pro",
"published": "2025-04-09T17:10:20-04:00",
"link": "https://www.theverge.com/news/646205/adobe-photoshop-premiere-pro-ai-creative-agent-actions"
},
{
"title": "Aqara adds support for 50 new Matter device types",
"published": "2025-04-09T17:06:23-04:00",
"link": "https://www.theverge.com/news/646199/aqara-matter-support-50-new-device-types"
},
{
"title": "Framework’s entry-level modular Laptop 12 starts at £499",
"published": "2025-04-09T16:40:28-04:00",
"link": "https://www.theverge.com/news/646031/framework-laptop-12-price-preorder-uk-eu-ca"
},
{
"title": "Trump says the future of AI is powered by coal",
"published": "2025-04-09T15:25:16-04:00",
"link": "https://www.theverge.com/energy/646011/trump-says-the-future-of-ai-is-powered-by-coal"
},
{
"title": "Framework raised prices and then un-raised them an hour later because of Trump",
"published": "2025-04-09T15:09:58-04:00",
"link": "https://www.theverge.com/news/646057/framework-raised-prices-and-then-un-raised-them-an-hour-later-because-of-trump"
},
{
"title": "Gemini can now turn your Google Docs into podcasts",
"published": "2025-04-09T14:59:27-04:00",
"link": "https://www.theverge.com/news/645986/gemini-can-now-turn-your-google-docs-into-podcasts"
},
{
"title": "Smart home device manufacturers are bracing for chaos — again",
"published": "2025-04-09T14:26:40-04:00",
"link": "https://www.theverge.com/smart-home/645927/smart-home-device-manufacturers-are-bracing-for-chaos-again"
},
{
"title": "One of our favorite video doorbells is on sale for $80",
"published": "2025-04-09T14:03:22-04:00",
"link": "https://www.theverge.com/tech/645930/one-of-our-favorite-video-doorbells-is-on-sale-for-80"
}
]
30 changes: 30 additions & 0 deletions PR.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
S C:\Users\gisel\desktop\challenge_1> git add challenge1.py
warning: in the working copy of 'challenge1.py', LF will be replaced by CRLF the next time Git touches it
PS C:\Users\gisel\desktop\challenge_1> git status
On branch cambios_en_read_me
Changes to be committed:
(use "git restore --staged <file>..." to unstage)
new file: challenge1.py

Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git restore <file>..." to discard changes in working directory)
modified: README.md

PS C:\Users\gisel\desktop\challenge_1> git commit m- "cambios en readme y agregar archivo en nueva branch"
error: pathspec 'm-' did not match any file(s) known to git
error: pathspec 'cambios en readme y agregar archivo en nueva branch' did not match any file(s) known to git
PS C:\Users\gisel\desktop\challenge_1> git commit -m "cambios en readme y archivo nuevo"
[cambios_en_read_me 80695c0] cambios en readme y archivo nuevo
1 file changed, 20 insertions(+)
create mode 100644 challenge1.py
PS C:\Users\gisel\desktop\challenge_1> git push origin cambios_en_read_me
Enumerating objects: 4, done.
Counting objects: 100% (4/4), done.
Delta compression using up to 16 threads
Compressing objects: 100% (3/3), done.
Writing objects: 100% (3/3), 536 bytes | 268.00 KiB/s, done.
Total 3 (delta 0), reused 0 (delta 0), pack-reused 0 (from 0)
To https://github.com/mundo-python/challenge_1
22acf23..80695c0 cambios_en_read_me -> cambios_en_read_me
PS C:\Users\gisel\desktop\challenge_1>
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ Linea agregada por Giselle

Cambio 2 again

cambio 3
39 changes: 39 additions & 0 deletions ch1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import requests
import feedparser
import json

url = f'https://www.theverge.com/rss/index.xml'
r= requests.get(url)

if r.status_code != 200:
print(f"error with status code {r.status_code}")

#los primeros 10 items

feed = feedparser.parse(url)

items = feed.entries[:10]

for i, item in enumerate(items, start=1):
title = item.title
pub_date = item.published
link = item.link

print(f"{i}. Title: {title}")
print(f"Date: {pub_date}")
print(f"Link: {link}")

#Guardar un archivo JSON
data = []
for item in items:
lista = {
"title": item.title,
"published": item.published,
"link": item.link
}
data.append(lista)


with open("10items.json", "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)

20 changes: 0 additions & 20 deletions challenge1.py

This file was deleted.

Binary file added github cesar .pdf
Binary file not shown.
Binary file added how_to_create_a_branch.pdf
Binary file not shown.
45 changes: 45 additions & 0 deletions kavak.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import requests
from bs4 import BeautifulSoup
import time
import json

url = "https://www.kavak.com/mx/seminuevos/coupe?page=0"
response = requests.get(url)
if response.status_code != 200:
print(f"Error con code {response.status_code}")

paginas = 6
coupes = []

for pagina in range(paginas):
url_salto = url + str(pagina)
print(f"Consultando: {url_salto}")

respuesta = requests.get(url_salto)
soup = BeautifulSoup(respuesta.text, "html.parser")

auto_tag = soup.find_all("h3", class_ = "card-product_cardProduct__title__RR0CK")
info_tag = soup.find_all("p", class_ = "card-product_cardProduct__subtitle__hbN2a")
precio_tag = soup.find_all("span", class_ = "amount_uki-amount__large__price__2NvVx")

for a, info, precio, in zip(auto_tag, info_tag, precio_tag):
auto = a.text.strip()
partes_info = info.text.strip().split(" • ")
año = partes_info[0]
kilometraje = partes_info[1]
precio = precio.text.strip()
precio_clean = precio.replace("$", "").replace(",", "").strip()
precio_num = int(precio_clean)

coupes.append(
{"modelo": auto,
"año": año,
"kilometraje": kilometraje,
"precio": precio,
"precio_num": precio_num
})

coupes_ordenados = sorted(coupes, key=lambda x: x["precio_num"])

with open("coupes.json", "w", encoding = "utf-8") as f:
json.dump(coupes_ordenados, f, ensure_ascii = False, indent = 4)