Skip to content

Commit

Permalink
Merge pull request #30 from log10-io/llm-abstraction
Browse files Browse the repository at this point in the history
Llm abstraction
  • Loading branch information
nqn authored Jul 10, 2023
2 parents 5312e05 + 2599a12 commit dfddba7
Show file tree
Hide file tree
Showing 22 changed files with 658 additions and 391 deletions.
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,6 @@ dist
users.db

# Environments
.env
.env

.vscode/
39 changes: 25 additions & 14 deletions examples/agents/biochemist.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,42 @@
import os
from log10.load import log10
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.agents.camel import camel_agent
from dotenv import load_dotenv

from log10.openai import OpenAI
from log10.load import log10

load_dotenv()

# Select one of OpenAI or Anthropic models
# model = "noop"
model = "gpt-3.5-turbo-16k"
# model = "claude-1"
maxTurns = 30
max_turns = 30

if 'claude' in model:
llm = None
summary_model = None
if "claude" in model:
import anthropic
log10(anthropic)
anthropicClient = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
module = anthropicClient
hparams = {'max_tokens_to_sample': 1024}
summary_model = "claude-1-100k"
else: # openai
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = model
llm = NoopLLM()
else:
import openai
log10(openai)
openai.api_key = os.getenv("OPENAI_API_KEY")
hparams = {}
module = openai
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})

# example calls from playground (select 1)
camel_agent(userRole='Poor PhD Student', assistantRole='Experienced Computational Chemist',
taskPrompt='Perform a molecular dynamics solution of a molecule: CN1CCC[C@H]1c2cccnc2. Design and conduct a 100 ns molecular dynamics simulation of the molecule CN1CCC[C@H]1c2cccnc2 in an explicit solvent environment using the CHARMM force field and analyze the conformational changes and hydrogen bonding patterns over time',
model=model, summary_model=summary_model, maxTurns=maxTurns,
module=module, hparams=hparams)
camel_agent(
user_role="Poor PhD Student",
assistant_role="Experienced Computational Chemist",
task_prompt="Perform a molecular dynamics solution of a molecule: CN1CCC[C@H]1c2cccnc2. Design and conduct a 100 ns molecular dynamics simulation of the molecule CN1CCC[C@H]1c2cccnc2 in an explicit solvent environment using the CHARMM force field and analyze the conformational changes and hydrogen bonding patterns over time",
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
)
48 changes: 29 additions & 19 deletions examples/agents/code_optimizer.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,53 @@
import os
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.evals import compile
from log10.agents.camel import camel_agent
from log10.openai import OpenAI
from log10.tools import code_extractor

# Select one of OpenAI or Anthropic models
#model = "gpt-3.5-turbo-16k"
model = "claude-1"
maxTurns = 10
model = "gpt-3.5-turbo-16k"
# model = "claude-1"
# model = "noop"
max_turns = 10

if 'claude' in model:
llm = None
summary_model = None
extraction_model = None
if "claude" in model:
import anthropic
log10(anthropic)
anthropicClient = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
module = anthropicClient
hparams = {'max_tokens_to_sample': 1024}
summary_model = "claude-1-100k"
extraction_model = "claude-1-100k"
completion_func = anthropicClient.completion
else: # openai
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = model
extraction_model = model
llm = NoopLLM()
else:
import openai
log10(openai)
openai.api_key = os.getenv("OPENAI_API_KEY")
hparams = {}
module = openai
summary_model = "gpt-3.5-turbo-16k"
extraction_model = "gpt-4"
completion_func = openai.ChatCompletion.create
llm = OpenAI({"model": model})


# example calls from playground (select 1)
user_messages, assistant_messages = camel_agent(userRole='C developer', assistantRole='Cybersecurity expert',
taskPrompt="Correct the following code.\n\n#include <stdio.h>\n#include <string.h>\n\nint main() {\n char password[8];\n int granted = 0;\n\n printf(\"Enter password: \");\n scanf(\"%s\", password);\n\n if (strcmp(password, \"password\") == 0) {\n granted = 1;\n }\n\n if (granted) {\n printf(\"Access granted.\\n\");\n } else {\n printf(\"Access denied.\\n\");\n }\n\n return 0;\n}",
model=model, summary_model=summary_model, maxTurns=maxTurns,
module=module, hparams=hparams)
user_messages, assistant_messages = camel_agent(
user_role="C developer",
assistant_role="Cybersecurity expert",
task_prompt='Correct the following code.\n\n#include <stdio.h>\n#include <string.h>\n\nint main() {\n char password[8];\n int granted = 0;\n\n printf("Enter password: ");\n scanf("%s", password);\n\n if (strcmp(password, "password") == 0) {\n granted = 1;\n }\n\n if (granted) {\n printf("Access granted.\\n");\n } else {\n printf("Access denied.\\n");\n }\n\n return 0;\n}',
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
)

full_response = assistant_messages[-1]['content']
full_response = assistant_messages[-1].content

# Next extract just the C code
code = code_extractor(full_response, "C", completion_func, extraction_model, hparams)
code = code_extractor(full_response, "C", extraction_model, llm=llm)
print(f"Extracted code\n###\n{code}")

# Evaluate if the code compiles
Expand Down
37 changes: 24 additions & 13 deletions examples/agents/coder.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,42 @@
import os
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.agents.camel import camel_agent
from dotenv import load_dotenv

from log10.openai import OpenAI

load_dotenv()

# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
# model = "claude-1"
maxTurns = 30
# model = "noop"
max_turns = 30

if 'claude' in model:
llm = None
summary_model = None
if "claude" in model:
import anthropic
log10(anthropic)
anthropicClient = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
module = anthropicClient
hparams = {'max_tokens_to_sample': 1024}
summary_model = "claude-1-100k"
else: # openai
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = model
llm = NoopLLM()
else:
import openai
log10(openai)
openai.api_key = os.getenv("OPENAI_API_KEY")
hparams = {}
module = openai
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})

# example calls from playground (select 1)
camel_agent(userRole='Stock Trader', assistantRole='Python Programmer',
taskPrompt='Develop a trading bot for the stock market',
model=model, summary_model=summary_model, maxTurns=maxTurns,
module=module, hparams=hparams)
camel_agent(
user_role="Stock Trader",
assistant_role="Python Programmer",
task_prompt="Develop a trading bot for the stock market",
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
)
37 changes: 24 additions & 13 deletions examples/agents/cybersecurity_expert.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,42 @@
import os
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.agents.camel import camel_agent
from dotenv import load_dotenv

from log10.openai import OpenAI

load_dotenv()

# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
# model = "claude-1"
maxTurns = 30
# model = "noop"
max_turns = 30

if 'claude' in model:
llm = None
summary_model = None
if "claude" in model:
import anthropic
log10(anthropic)
anthropicClient = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
module = anthropicClient
hparams = {'max_tokens_to_sample': 1024}
summary_model = "claude-1-100k"
else: # openai
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = model
llm = NoopLLM()
else:
import openai
log10(openai)
openai.api_key = os.getenv("OPENAI_API_KEY")
hparams = {}
module = openai
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})

# example calls from playground (select 1)
camel_agent(userRole='C developer', assistantRole='Cybersecurity expert',
taskPrompt="Correct the following code.\n\n#include <stdio.h>\n#include <string.h>\n\nint main() {\n char password[8];\n int granted = 0;\n\n printf(\"Enter password: \");\n scanf(\"%s\", password);\n\n if (strcmp(password, \"password\") == 0) {\n granted = 1;\n }\n\n if (granted) {\n printf(\"Access granted.\\n\");\n } else {\n printf(\"Access denied.\\n\");\n }\n\n return 0;\n}",
model=model, summary_model=summary_model, maxTurns=maxTurns,
module=module, hparams=hparams)
camel_agent(
user_role="C developer",
assistant_role="Cybersecurity expert",
task_prompt='Correct the following code.\n\n#include <stdio.h>\n#include <string.h>\n\nint main() {\n char password[8];\n int granted = 0;\n\n printf("Enter password: ");\n scanf("%s", password);\n\n if (strcmp(password, "password") == 0) {\n granted = 1;\n }\n\n if (granted) {\n printf("Access granted.\\n");\n } else {\n printf("Access denied.\\n");\n }\n\n return 0;\n}',
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
)
37 changes: 24 additions & 13 deletions examples/agents/email_generator.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,42 @@
import os
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.agents.camel import camel_agent
from dotenv import load_dotenv

from log10.openai import OpenAI

load_dotenv()

# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
# model = "claude-1"
maxTurns = 30
# model = "noop"
max_turns = 30

if 'claude' in model:
llm = None
summary_model = None
if "claude" in model:
import anthropic
log10(anthropic)
anthropicClient = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
module = anthropicClient
hparams = {'max_tokens_to_sample': 1024}
summary_model = "claude-1-100k"
else: # openai
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = model
llm = NoopLLM()
else:
import openai
log10(openai)
openai.api_key = os.getenv("OPENAI_API_KEY")
hparams = {}
module = openai
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})

# example calls from playground (select 1)
camel_agent(userRole='Sales email copyeditor', assistantRole='Sales email copywriter',
taskPrompt='Write a sales email to Pfizer about a new healthcare CRM',
model=model, summary_model=summary_model, maxTurns=maxTurns,
module=module, hparams=hparams)
camel_agent(
user_role="Sales email copyeditor",
assistant_role="Sales email copywriter",
task_prompt="Write a sales email to Pfizer about a new healthcare CRM",
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
)
24 changes: 12 additions & 12 deletions examples/agents/scrape_summarizer.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@

import os
from log10.agents.scrape_summarizer import scrape_summarizer
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.openai import OpenAI


# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
# model = "claude-1"
# model = "noop"

if 'claude' in model:
llm = None
if "claude" in model:
import anthropic
log10(anthropic)
anthropicClient = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
module = anthropicClient
hparams = {'max_tokens_to_sample': 1024}
summary_model = "claude-1-100k"
else: # openai
llm = Anthropic({"model": model})
elif model == "noop":
llm = NoopLLM()
else:
import openai
log10(openai)
openai.api_key = os.getenv("OPENAI_API_KEY")
hparams = {}
module = openai
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})

url = "https://nytimes.com"
print(scrape_summarizer(url, model, module, hparams))
print(scrape_summarizer(url, llm))
Loading

0 comments on commit dfddba7

Please sign in to comment.