forked from openai/chatgpt-retrieval-plugin
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathopenai.py
59 lines (44 loc) · 1.78 KB
/
openai.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from typing import List
import openai
from tenacity import retry, wait_random_exponential, stop_after_attempt
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(3))
def get_embeddings(texts: List[str]) -> List[List[float]]:
"""
Embed texts using OpenAI's ada model.
Args:
texts: The list of texts to embed.
Returns:
A list of embeddings, each of which is a list of floats.
Raises:
Exception: If the OpenAI API call fails.
"""
# Call the OpenAI API to get the embeddings
response = openai.Embedding.create(input=texts, model="text-embedding-ada-002")
# Extract the embedding data from the response
data = response["data"] # type: ignore
# Return the embeddings as a list of lists of floats
return [result["embedding"] for result in data]
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(3))
def get_chat_completion(
messages,
model="gpt-3.5-turbo", # use "gpt-4" for better results
):
"""
Generate a chat completion using OpenAI's chat completion API.
Args:
messages: The list of messages in the chat history.
model: The name of the model to use for the completion. Default is gpt-3.5-turbo, which is a fast, cheap and versatile model. Use gpt-4 for higher quality but slower results.
Returns:
A string containing the chat completion.
Raises:
Exception: If the OpenAI API call fails.
"""
# call the OpenAI chat completion API with the given messages
response = openai.ChatCompletion.create(
model=model,
messages=messages,
)
choices = response["choices"] # type: ignore
completion = choices[0].message.content.strip()
print(f"Completion: {completion}")
return completion