Skip to content

Commit 4920720

Browse files
github-actionsgithub-actions
authored andcommitted
Format Python code with psf/black push
1 parent b3024ff commit 4920720

File tree

6 files changed

+44
-17
lines changed

6 files changed

+44
-17
lines changed

cogs/code_interpreter_service_cog.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -455,7 +455,12 @@ async def code_interpreter_chat_command(
455455
)
456456
)
457457

458-
llm = ChatOpenAI(model=model, temperature=temperature, top_p=top_p, openai_api_key=OPENAI_API_KEY)
458+
llm = ChatOpenAI(
459+
model=model,
460+
temperature=temperature,
461+
top_p=top_p,
462+
openai_api_key=OPENAI_API_KEY,
463+
)
459464

460465
max_token_limit = 29000 if "gpt-4" in model else 7500
461466

cogs/commands.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -761,7 +761,7 @@ async def load_index(
761761
input_type=discord.SlashCommandOptionType.number,
762762
max_value=1,
763763
min_value=0,
764-
default=0
764+
default=0,
765765
)
766766
@discord.option(
767767
name="top_p",
@@ -770,7 +770,7 @@ async def load_index(
770770
input_type=discord.SlashCommandOptionType.number,
771771
max_value=1,
772772
min_value=0,
773-
default=1
773+
default=1,
774774
)
775775
async def talk(
776776
self,
@@ -1137,7 +1137,7 @@ async def draw_action(self, ctx, message: discord.Message):
11371137
input_type=discord.SlashCommandOptionType.number,
11381138
max_value=1,
11391139
min_value=0,
1140-
default=0
1140+
default=0,
11411141
)
11421142
@discord.option(
11431143
name="top_p",
@@ -1146,7 +1146,7 @@ async def draw_action(self, ctx, message: discord.Message):
11461146
input_type=discord.SlashCommandOptionType.number,
11471147
max_value=1,
11481148
min_value=0,
1149-
default=1
1149+
default=1,
11501150
)
11511151
async def chat_code(
11521152
self,
@@ -1160,7 +1160,9 @@ async def chat_code(
11601160
"Code interpretation is disabled on this server.", ephemeral=True
11611161
)
11621162
return
1163-
await self.code_interpreter_cog.code_interpreter_chat_command(ctx, model=model, temperature=temperature, top_p=top_p)
1163+
await self.code_interpreter_cog.code_interpreter_chat_command(
1164+
ctx, model=model, temperature=temperature, top_p=top_p
1165+
)
11641166

11651167
"""
11661168
Translation commands and actions
@@ -1284,7 +1286,7 @@ async def summarize_action(self, ctx, message: discord.Message):
12841286
input_type=discord.SlashCommandOptionType.number,
12851287
max_value=1,
12861288
min_value=0,
1287-
default=0
1289+
default=0,
12881290
)
12891291
@discord.option(
12901292
name="top_p",
@@ -1293,7 +1295,7 @@ async def summarize_action(self, ctx, message: discord.Message):
12931295
input_type=discord.SlashCommandOptionType.number,
12941296
max_value=1,
12951297
min_value=0,
1296-
default=1
1298+
default=1,
12971299
)
12981300
async def chat(
12991301
self,
@@ -1304,7 +1306,11 @@ async def chat(
13041306
top_p: float = 1,
13051307
):
13061308
await self.search_cog.search_chat_command(
1307-
ctx, search_scope=search_scope, model=model, temperature=temperature, top_p=top_p,
1309+
ctx,
1310+
search_scope=search_scope,
1311+
model=model,
1312+
temperature=temperature,
1313+
top_p=top_p,
13081314
)
13091315

13101316
# Search slash commands
@@ -1371,7 +1377,7 @@ async def search(
13711377
nodes: int = 4,
13721378
deep: bool = False,
13731379
response_mode: str = "refine",
1374-
model: str ="gpt-4-1106-preview",
1380+
model: str = "gpt-4-1106-preview",
13751381
multistep: bool = False,
13761382
):
13771383
await self.search_cog.search_command(

cogs/index_service_cog.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,8 @@ async def on_message(self, message):
152152

153153
prompt += (
154154
"\n{System Message: the user has just uploaded the file "
155-
+ str(file.filename) + "\n"
155+
+ str(file.filename)
156+
+ "\n"
156157
)
157158

158159
# Link operations, allow for user link upload, we connect and download the content at the link.

cogs/search_service_cog.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,12 @@ async def on_message(self, message):
419419
safe_remove_list(self.thread_awaiting_responses, message.channel.id)
420420

421421
async def search_chat_command(
422-
self, ctx: discord.ApplicationContext, model, search_scope=2, temperature=0, top_p=1,
422+
self,
423+
ctx: discord.ApplicationContext,
424+
model,
425+
search_scope=2,
426+
temperature=0,
427+
top_p=1,
423428
):
424429
await ctx.defer()
425430
embed_title = f"{ctx.user.name}'s internet-connected conversation with GPT"
@@ -477,7 +482,12 @@ async def search_chat_command(
477482
traceback.print_exc()
478483
print("Wolfram tool not added to internet-connected conversation agent.")
479484

480-
llm = ChatOpenAI(model=model, temperature=temperature, top_p=top_p, openai_api_key=OPENAI_API_KEY)
485+
llm = ChatOpenAI(
486+
model=model,
487+
temperature=temperature,
488+
top_p=top_p,
489+
openai_api_key=OPENAI_API_KEY,
490+
)
481491

482492
max_token_limit = 29000 if "gpt-4" in model else 7500
483493

models/index_model.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -511,8 +511,12 @@ async def start_index_chat(self, ctx, model, temperature, top_p):
511511
preparation_message = await ctx.channel.send(
512512
embed=EmbedStatics.get_index_chat_preparation_message()
513513
)
514-
llm = ChatOpenAI(model=model, temperature=temperature, top_p=top_p, max_retries=2)
515-
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=temperature, top_p=top_p, model_name=model))
514+
llm = ChatOpenAI(
515+
model=model, temperature=temperature, top_p=top_p, max_retries=2
516+
)
517+
llm_predictor = LLMPredictor(
518+
llm=ChatOpenAI(temperature=temperature, top_p=top_p, model_name=model)
519+
)
516520

517521
max_token_limit = 29000 if "gpt-4" in model else 7500
518522

models/search_model.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,6 @@ async def search(
228228
os.environ["OPENAI_API_KEY"] = user_api_key
229229
openai.api_key = os.environ["OPENAI_API_KEY"]
230230

231-
232231
# Initialize the search cost
233232
price = 0
234233

@@ -340,7 +339,9 @@ async def search(
340339
embedding_model = OpenAIEmbedding()
341340

342341
if "vision" in model:
343-
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model=model, max_tokens=4096))
342+
llm_predictor = LLMPredictor(
343+
llm=ChatOpenAI(temperature=0, model=model, max_tokens=4096)
344+
)
344345
else:
345346
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model=model))
346347

0 commit comments

Comments
 (0)