File Coverage

blib/lib/OpenAI/API/Request/Chat.pm
Criterion Covered Total %
statement 21 34 61.7
branch 0 2 0.0
condition 0 3 0.0
subroutine 7 11 63.6
pod 4 4 100.0
total 32 54 59.2


line stmt bran cond sub pod time code
1             package OpenAI::API::Request::Chat;
2              
3 16     16   115 use strict;
  16         60  
  16         523  
4 16     16   91 use warnings;
  16         37  
  16         501  
5              
6 16     16   90 use Carp qw/croak/;
  16         36  
  16         877  
7              
8 16     16   108 use Moo;
  16         48  
  16         106  
9 16     16   6224 use strictures 2;
  16         137  
  16         707  
10 16     16   3127 use namespace::clean;
  16         46  
  16         183  
11              
12             extends 'OpenAI::API::Request';
13              
14 16     16   4968 use Types::Standard qw(Any Bool Int Num Str Map ArrayRef HashRef);
  16         83  
  16         150  
15              
16             has model => ( is => 'rw', isa => Str, default => 'gpt-3.5-turbo' );
17             has messages => ( is => 'rw', isa => ArrayRef [HashRef], default => sub { [] } );
18              
19             has max_tokens => ( is => 'rw', isa => Int, );
20             has temperature => ( is => 'rw', isa => Num, );
21             has top_p => ( is => 'rw', isa => Num, );
22             has n => ( is => 'rw', isa => Int, );
23             has stream => ( is => 'rw', isa => Bool, );
24             has logprobs => ( is => 'rw', isa => Int, );
25             has echo => ( is => 'rw', isa => Bool, );
26             has stop => ( is => 'rw', isa => Any, );
27             has presence_penalty => ( is => 'rw', isa => Num, );
28             has frequency_penalty => ( is => 'rw', isa => Num, );
29             has logit_bias => ( is => 'rw', isa => Map [ Int, Int ], );
30             has user => ( is => 'rw', isa => Str, );
31              
32 0     0 1   sub endpoint { 'chat/completions' }
33 0     0 1   sub method { 'POST' }
34              
35             sub add_message {
36 0     0 1   my ( $self, $role, $content ) = @_;
37              
38 0 0 0       croak 'add_message() requires two parameters: role and content' if !defined $role || !defined $content;
39              
40 0           push @{ $self->messages }, { role => $role, content => $content };
  0            
41              
42 0           return $self;
43             }
44              
45             sub send_message {
46 0     0 1   my ( $self, $content ) = @_;
47              
48 0           $self->add_message( 'user', $content );
49              
50 0           my $res = $self->send();
51 0           my $assistant_response = $res->{choices}[0]{message}{content};
52              
53 0           $self->add_message( 'assistant', $assistant_response );
54              
55 0           return $res;
56             }
57              
58             1;
59              
60             __END__
61              
62             =head1 NAME
63              
64             OpenAI::API::Request::Chat - chat endpoint
65              
66             =head1 SYNOPSIS
67              
68             use OpenAI::API::Request::Chat;
69              
70             my $chat = OpenAI::API::Request::Chat->new(
71             messages => [
72             { "role" => "system", "content" => "You are a helpful assistant." },
73             ],
74             );
75              
76             my $res = $chat->send_message('Who won the world series in 2020?');
77              
78             =head1 DESCRIPTION
79              
80             Given a chat conversation, the model will return a chat completion
81             response (similar to ChatGPT).
82              
83             =head1 METHODS
84              
85             =head2 new()
86              
87             =over 4
88              
89             =item * model
90              
91             ID of the model to use.
92              
93             See L<Models overview|https://platform.openai.com/docs/models/overview>
94             for a reference of them.
95              
96             =item * messages
97              
98             The messages to generate chat completions for, in the L<chat
99             format|https://platform.openai.com/docs/guides/chat/introduction>.
100              
101             =item * max_tokens [optional]
102              
103             The maximum number of tokens to generate.
104              
105             Most models have a context length of 2048 tokens (except for the newest
106             models, which support 4096.
107              
108             =item * temperature [optional]
109              
110             What sampling temperature to use, between 0 and 2. Higher values like
111             0.8 will make the output more random, while lower values like 0.2 will
112             make it more focused and deterministic.
113              
114             =item * top_p [optional]
115              
116             An alternative to sampling with temperature, called nucleus sampling.
117              
118             We generally recommend altering this or C<temperature> but not both.
119              
120             =item * n [optional]
121              
122             How many completions to generate for each prompt.
123              
124             Use carefully and ensure that you have reasonable settings for
125             C<max_tokens> and C<stop>.
126              
127             =item * stop [optional]
128              
129             Up to 4 sequences where the API will stop generating further tokens. The
130             returned text will not contain the stop sequence.
131              
132             =item * frequency_penalty [optional]
133              
134             Number between -2.0 and 2.0. Positive values penalize new tokens based
135             on their existing frequency in the text so far.
136              
137             =item * presence_penalty [optional]
138              
139             Number between -2.0 and 2.0. Positive values penalize new tokens based
140             on whether they appear in the text so far.
141              
142             =item * user [optional]
143              
144             A unique identifier representing your end-user, which can help OpenAI
145             to monitor and detect abuse.
146              
147             =back
148              
149             =head2 send()
150              
151             Sends the request and returns a data structured similar to the one
152             documented in the API reference.
153              
154             =head2 send_async()
155              
156             Send a request asynchronously. Returns a L<future|IO::Async::Future> that will
157             be resolved with the decoded JSON response. See L<OpenAI::API::Request>
158             for an example.
159              
160             =head2 add_message($role, $content)
161              
162             Appends a message to the list of messages without sending a request.
163              
164             Returns C<$self>, so it can be chained with C<send> or C<send_async>:
165              
166             my $res = OpenAI::API::Request::Chat->new()->add_message( user => 'Hi!' )->send();
167              
168             =head2 send_message($content)
169              
170             Sends a single message as user, appending messages to the conversation
171             automatically. This allows you to treat the C<OpenAI::API::Request::Chat>
172             object as a "chat" instead of a single request:
173              
174             my $chat = OpenAI::API::Request::Chat->new();
175              
176             my $res1 = $chat->send_message("Hello!");
177             print "$res1\n";
178              
179             my $res2 = $chat->send_message("What can you do?");
180             print "$res2\n";
181              
182             =head1 SEE ALSO
183              
184             OpenAI API Reference: L<Chat|https://platform.openai.com/docs/api-reference/chat>