Skip to content
This repository was archived by the owner on May 13, 2024. It is now read-only.

Commit 1932f63

Browse files
author
Chris Lemke
committed
refactor: add skipuniversalaction and create new variable for max_tokens
1 parent d268519 commit 1932f63

File tree

3 files changed

+39
-12
lines changed

3 files changed

+39
-12
lines changed

info.plist

Lines changed: 37 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1027,6 +1027,8 @@
10271027
<integer>0</integer>
10281028
<key>keyword</key>
10291029
<string>cfi</string>
1030+
<key>skipuniversalaction</key>
1031+
<true/>
10301032
<key>subtext</key>
10311033
<string>Using DALL·E 2 to generate images 🎨</string>
10321034
<key>text</key>
@@ -1275,6 +1277,8 @@ sys.stdout.write(sys.argv[2])</string>
12751277
<integer>1</integer>
12761278
<key>scriptfile</key>
12771279
<string></string>
1280+
<key>skipuniversalaction</key>
1281+
<true/>
12781282
<key>subtext</key>
12791283
<string>Talk to ChatGPT 💬</string>
12801284
<key>title</key>
@@ -2215,6 +2219,8 @@ python3 -c 'import caching_manager; caching_manager.combine_user_input_with_quer
22152219
<integer>1</integer>
22162220
<key>scriptfile</key>
22172221
<string></string>
2222+
<key>skipuniversalaction</key>
2223+
<true/>
22182224
<key>subtext</key>
22192225
<string>Use a InstructGPT model 🤖</string>
22202226
<key>title</key>
@@ -3378,22 +3384,22 @@ Please refer to OpenAI's [safety best practices guide](https://platform.openai.c
33783384
<key>config</key>
33793385
<dict>
33803386
<key>default</key>
3381-
<string>0</string>
3387+
<string></string>
33823388
<key>placeholder</key>
3383-
<string>0</string>
3389+
<string></string>
33843390
<key>required</key>
3385-
<true/>
3391+
<false/>
33863392
<key>trim</key>
33873393
<true/>
33883394
</dict>
33893395
<key>description</key>
3390-
<string>The temperature determines how greedy the generative model is (between 0 and 2). If the temperature is high, the model can output words other than the highest probability with a fairly high probability. The generated text will be more diverse, but there is a higher probability for grammar errors and the generation of nonsense.</string>
3396+
<string>The maximum number of tokens to generate by the ChatGPT model. If not set OpenAI's default value will be used.</string>
33913397
<key>label</key>
3392-
<string>Temperature</string>
3398+
<string>ChatGPT maximum tokens</string>
33933399
<key>type</key>
33943400
<string>textfield</string>
33953401
<key>variable</key>
3396-
<string>temperature</string>
3402+
<string>chat_max_tokens</string>
33973403
</dict>
33983404
<dict>
33993405
<key>config</key>
@@ -3408,13 +3414,34 @@ Please refer to OpenAI's [safety best practices guide](https://platform.openai.c
34083414
<true/>
34093415
</dict>
34103416
<key>description</key>
3411-
<string>The maximum number of tokens to generate by the model. If not set OpenAI's default value will be used.</string>
3417+
<string>The maximum number of tokens to generate by the InstructGPT model. If not set OpenAI's default value will be used.</string>
34123418
<key>label</key>
3413-
<string>Maximum tokens</string>
3419+
<string>InstructGPT maximum tokens</string>
34143420
<key>type</key>
34153421
<string>textfield</string>
34163422
<key>variable</key>
3417-
<string>max_tokens</string>
3423+
<string>completion_max_tokens</string>
3424+
</dict>
3425+
<dict>
3426+
<key>config</key>
3427+
<dict>
3428+
<key>default</key>
3429+
<string>0</string>
3430+
<key>placeholder</key>
3431+
<string>0</string>
3432+
<key>required</key>
3433+
<true/>
3434+
<key>trim</key>
3435+
<true/>
3436+
</dict>
3437+
<key>description</key>
3438+
<string>The temperature determines how greedy the generative model is (between 0 and 2). If the temperature is high, the model can output words other than the highest probability with a fairly high probability. The generated text will be more diverse, but there is a higher probability for grammar errors and the generation of nonsense.</string>
3439+
<key>label</key>
3440+
<string>Temperature</string>
3441+
<key>type</key>
3442+
<string>textfield</string>
3443+
<key>variable</key>
3444+
<string>temperature</string>
34183445
</dict>
34193446
<dict>
34203447
<key>config</key>
@@ -3653,7 +3680,7 @@ Please refer to OpenAI's [safety best practices guide](https://platform.openai.c
36533680
</dict>
36543681
</array>
36553682
<key>version</key>
3656-
<string>1.3.1</string>
3683+
<string>1.3.2</string>
36573684
<key>webaddress</key>
36583685
<string>https://github.com/chrislemke/ChatFred</string>
36593686
</dict>

workflow/src/text_chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
__model = os.getenv("chat_gpt_model") or "gpt-3.5-turbo"
2626
__history_length = int(os.getenv("history_length") or 4)
2727
__temperature = float(os.getenv("temperature") or 0.0)
28-
__max_tokens = int(os.getenv("max_tokens")) if os.getenv("max_tokens") else None # type: ignore
28+
__max_tokens = int(os.getenv("chat_max_tokens")) if os.getenv("chat_max_tokens") else None # type: ignore
2929
__top_p = int(os.getenv("top_p") or 1)
3030
__frequency_penalty = float(os.getenv("frequency_penalty") or 0.0)
3131
__presence_penalty = float(os.getenv("presence_penalty") or 0.0)

workflow/src/text_completion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
openai.api_key = os.getenv("api_key")
2121
__model = os.getenv("instruct_gpt_model") or "text-davinci-003"
2222
__temperature = float(os.getenv("temperature") or 0.0)
23-
__max_tokens = int(os.getenv("max_tokens") or 50)
23+
__max_tokens = int(os.getenv("completion_max_tokens") or 50)
2424
__top_p = int(os.getenv("top_p") or 1)
2525
__frequency_penalty = float(os.getenv("frequency_penalty") or 0.0)
2626
__presence_penalty = float(os.getenv("presence_penalty") or 0.0)

0 commit comments

Comments
 (0)