Skip to content

Commit 4da3ea8

Browse files
committed
Implement donation levels
1 parent 03a8227 commit 4da3ea8

File tree

1 file changed

+70
-5
lines changed

1 file changed

+70
-5
lines changed

bot/exts/smart_eval/_cog.py

Lines changed: 70 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import asyncio
12
import random
23
import re
34

@@ -8,6 +9,15 @@
89
from bot.bot import SirRobin
910
from bot.exts.smart_eval._smart_eval_rules import DEFAULT_RESPONSES, RULES
1011

12+
DONATION_LEVELS = {
13+
# Number of donations: (response time, intelligence level)
14+
0: (15, 0),
15+
10: (10, 1),
16+
20: (8, 2),
17+
30: (6, 3),
18+
40: (5, 4),
19+
50: (4, 5),
20+
}
1121

1222
class SmartEval(commands.Cog):
1323
"""Cog that handles all Smart Eval functionality."""
@@ -20,9 +30,51 @@ def __init__(self, bot: SirRobin):
2030

2131
async def cog_load(self) -> None:
2232
"""Run startup tasks needed when cog is first loaded."""
23-
self.total_donations = await self.smarte_donation_cache.length()
33+
34+
async def get_gpu_capabilities(self) -> tuple[int, int]:
35+
"""Get the GPU capabilites based on the number of donated GPUs."""
36+
total_donations = await self.total_donations()
37+
response_time, intelligence_level = DONATION_LEVELS[0]
38+
for donation_level, (time, max_response) in DONATION_LEVELS.items():
39+
if total_donations >= donation_level:
40+
response_time = time
41+
intelligence_level = max_response
42+
else:
43+
break
44+
45+
return response_time, intelligence_level
46+
47+
@commands.command()
48+
async def donations(self, ctx: commands.Context) -> None:
49+
"""Display the number of donations recieved so far."""
50+
total_donations = await self.total_donations()
51+
response_time, intelligence_level = await self.get_gpu_capabilities()
52+
msg = (
53+
f"Currently, I have received {total_donations} GPU donations, "
54+
f"and am at intelligence level {intelligence_level}! "
55+
)
56+
57+
# Calculate donations needed to reach next intelligence level
58+
donations_needed = 0
59+
for donation_level in DONATION_LEVELS:
60+
if donation_level > total_donations:
61+
donations_needed = donation_level - total_donations
62+
break
63+
64+
if donations_needed:
65+
msg += (
66+
f"\n\nTo reach the next intelligence level, I need {donations_needed} more donations! "
67+
f"Please consider donating your GPU to help me out. "
68+
)
69+
70+
await ctx.reply(msg)
71+
72+
async def total_donations(self) -> int:
73+
"""Get the total number of donations."""
74+
return await self.smarte_donation_cache.length()
2475

2576
@commands.command(aliases=[])
77+
@commands.max_concurrency(1, commands.BucketType.user)
2678
async def donate(self, ctx: commands.Context, *, hardware: str | None = None) -> None:
2779
"""
2880
Donate your GPU to help power our Smart Eval command.
@@ -31,7 +83,10 @@ async def donate(self, ctx: commands.Context, *, hardware: str | None = None) ->
3183
"""
3284
if await self.smarte_donation_cache.contains(ctx.author.id):
3385
stored_hardware = await self.smarte_donation_cache.get(ctx.author.id)
34-
await ctx.reply(f"Thank you for donating your {stored_hardware} to our Smart Eval command.")
86+
await ctx.reply(
87+
"I can only take one donation per person. "
88+
f"Thank you for donating your {stored_hardware} to our Smart Eval command."
89+
)
3590
return
3691

3792
if hardware is None:
@@ -47,17 +102,19 @@ async def donate(self, ctx: commands.Context, *, hardware: str | None = None) ->
47102
fake_hardware = ... # Do some regex to pull out a semi-matching type of GPU and insert something else
48103
await self.smarte_donation_cache.set(ctx.author.id, hardware)
49104

50-
self.total_donations = await self.smarte_donation_cache.length()
51105
await ctx.reply(
52106
"Thank you for donating your GPU to our Smart Eval command!"
53107
f" I did decide that instead of {hardware}, it would be better if you donated {fake_hardware}."
54-
" So I've recorded that GPU donaton instead."
108+
" So I've recorded that GPU donation instead."
55109
" It will be used wisely and definitely not for shenanigans."
56110
)
57111

58112
@commands.command(aliases=["smarte"])
113+
@commands.max_concurrency(1, commands.BucketType.user)
59114
async def smart_eval(self, ctx: commands.Context, *, code: str) -> None:
60115
"""Evaluate your Python code with PyDis's newest chatbot."""
116+
response_time, intelligence_level = await self.get_gpu_capabilities()
117+
61118
if match := FORMATTED_CODE_REGEX.match(code):
62119
code = match.group("code")
63120
else:
@@ -81,4 +138,12 @@ async def smart_eval(self, ctx: commands.Context, *, code: str) -> None:
81138
matching_responses = DEFAULT_RESPONSES
82139
final_response = random.choice(matching_responses)
83140

84-
await ctx.reply(final_response)
141+
async with ctx.typing():
142+
await asyncio.sleep(response_time)
143+
144+
if len(final_response) <= 1000:
145+
await ctx.reply(final_response)
146+
else:
147+
await ctx.reply(
148+
"There's definitely something wrong but I'm just not sure how to put it concisely into words."
149+
)

0 commit comments

Comments
 (0)