|
316 | 316 | parameters: { |
317 | 317 | n: 1, |
318 | 318 | model: "gpt-4o-mini", |
319 | | - messages: [{content: "Hello World", role: "user"}], |
320 | | - temperature: 0.0 |
321 | | - # max_tokens: 4087 |
| 319 | + messages: [{content: "Hello World", role: "user"}] |
322 | 320 | } |
323 | 321 | } |
324 | 322 | end |
|
359 | 357 | { |
360 | 358 | n: 1, |
361 | 359 | model: "text-davinci-003", |
362 | | - prompt: "Hello World", |
363 | | - temperature: 0.0 |
364 | | - # max_tokens: 4095 |
| 360 | + prompt: "Hello World" |
365 | 361 | } |
366 | 362 | } |
367 | 363 | end |
|
375 | 371 | expect(subject.client).to receive(:chat).with({ |
376 | 372 | parameters: { |
377 | 373 | n: 1, |
378 | | - # max_tokens: 4087, |
379 | 374 | model: "gpt-4o-mini", |
380 | | - messages: [{content: "Hello World", role: "user"}], |
381 | | - temperature: 0.0 |
| 375 | + messages: [{content: "Hello World", role: "user"}] |
382 | 376 | } |
383 | 377 | }).and_return(response) |
384 | 378 | subject.complete(prompt: "Hello World") |
|
398 | 392 | parameters: { |
399 | 393 | n: 1, |
400 | 394 | model: "gpt-3.5-turbo", |
401 | | - messages: [{content: "Hello World", role: "user"}], |
402 | | - temperature: 0.0 # , |
403 | | - # max_tokens: 4086 |
| 395 | + messages: [{content: "Hello World", role: "user"}] |
404 | 396 | } |
405 | 397 | } |
406 | 398 | end |
|
409 | 401 | expect(subject.client).to receive(:chat).with({ |
410 | 402 | parameters: { |
411 | 403 | n: 1, |
412 | | - # max_tokens: 4087 , |
413 | 404 | model: "gpt-4o-mini", |
414 | | - messages: [{content: "Hello World", role: "user"}], |
415 | | - temperature: 0.0 |
| 405 | + messages: [{content: "Hello World", role: "user"}] |
416 | 406 | } |
417 | 407 | }).and_return(response) |
418 | 408 | subject.complete(prompt: "Hello World") |
|
422 | 412 |
|
423 | 413 | context "with prompt and parameters" do |
424 | 414 | let(:parameters) do |
425 | | - {parameters: {n: 1, model: "gpt-3.5-turbo", messages: [{content: "Hello World", role: "user"}], temperature: 1.0}} # , max_tokens: 4087}} |
| 415 | + {parameters: {n: 1, model: "gpt-3.5-turbo", messages: [{content: "Hello World", role: "user"}]}} |
426 | 416 | end |
427 | 417 |
|
428 | 418 | it "returns a completion" do |
429 | | - response = subject.complete(prompt: "Hello World", model: "gpt-3.5-turbo", temperature: 1.0) |
| 419 | + response = subject.complete(prompt: "Hello World", model: "gpt-3.5-turbo") |
430 | 420 |
|
431 | 421 | expect(response.completion).to eq("The meaning of life is subjective and can vary from person to person.") |
432 | 422 | end |
433 | 423 | end |
434 | 424 |
|
435 | 425 | context "with failed API call" do |
436 | 426 | let(:parameters) do |
437 | | - {parameters: {n: 1, model: "gpt-4o-mini", messages: [{content: "Hello World", role: "user"}], temperature: 0.0}} # , max_tokens: 4087}} |
| 427 | + {parameters: {n: 1, model: "gpt-4o-mini", messages: [{content: "Hello World", role: "user"}]}} |
438 | 428 | end |
439 | 429 | let(:response) do |
440 | 430 | {"error" => {"code" => 400, "message" => "User location is not supported for the API use.", "type" => "invalid_request_error"}} |
|
470 | 460 | describe "#chat" do |
471 | 461 | let(:prompt) { "What is the meaning of life?" } |
472 | 462 | let(:model) { "gpt-4o-mini" } |
473 | | - let(:temperature) { 0.0 } |
474 | 463 | let(:n) { 1 } |
475 | 464 | let(:history) { [content: prompt, role: "user"] } |
476 | | - let(:parameters) { {parameters: {n: n, messages: history, model: model, temperature: temperature}} } # max_tokens: be_between(4014, 4096)}} } |
| 465 | + let(:parameters) { {parameters: {n: n, messages: history, model: model}} } |
477 | 466 | let(:answer) { "As an AI language model, I don't have feelings, but I'm functioning well. How can I assist you today?" } |
478 | 467 | let(:answer_2) { "Alternative answer" } |
479 | 468 | let(:choices) do |
|
585 | 574 | end |
586 | 575 |
|
587 | 576 | context "with options" do |
588 | | - let(:temperature) { 0.75 } |
589 | 577 | let(:model) { "gpt-3.5-turbo-0301" } |
590 | 578 |
|
591 | 579 | it "sends prompt as message and additional params and returns a response message" do |
592 | | - response = subject.complete(prompt: prompt, model: model, temperature: temperature) |
| 580 | + response = subject.complete(prompt: prompt, model: model) |
593 | 581 |
|
594 | 582 | expect(response.chat_completion).to eq(answer) |
595 | 583 | end |
|
612 | 600 | end |
613 | 601 |
|
614 | 602 | it "returns multiple response messages" do |
615 | | - response = subject.chat(messages: [content: prompt, role: "user"], model: model, temperature: temperature, n: 2) |
| 603 | + response = subject.chat(messages: [content: prompt, role: "user"], model: model, n: 2) |
616 | 604 |
|
617 | 605 | expect(response.completions).to eq(choices) |
618 | 606 | end |
|
0 commit comments