Skip to content

Commit

Permalink
Merge pull request #22 from Daethyra/working
Browse files Browse the repository at this point in the history
Working
  • Loading branch information
Daethyra authored Oct 5, 2023
2 parents d72f1c0 + fd48443 commit 5573db4
Show file tree
Hide file tree
Showing 3 changed files with 119 additions and 67 deletions.
162 changes: 100 additions & 62 deletions Auto-Embedder/test.py
Original file line number Diff line number Diff line change
@@ -1,71 +1,109 @@
import unittest
from unittest.mock import patch
import os
import logging

import pytest
import asyncio
from dotenv import load_dotenv
from datetime import datetime
import os
from unittest.mock import patch, Mock
from typing import Dict, Union, List
import openai
import pinecone
import backoff
from pinembed import EnvConfig, OpenAIHandler, PineconeHandler, DataStreamHandler

# Load environment variables from .env file
load_dotenv()

# Initialize logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class EnvConfig:
"""Class for handling environment variables and API keys."""

def __init__(self) -> None:
"""Initialize environment variables."""
self.openai_key: str = os.getenv("OPENAI_API_KEY")
self.pinecone_key: str = os.getenv("PINECONE_API_KEY")
self.pinecone_environment: str = os.getenv("PINECONE_ENVIRONMENT")
self.pinecone_environment: str = os.getenv("PINEDEX")
@pytest.fixture
def mock_env_config():
"""Fixture for setting up a mock environment configuration.
Mocks os.getenv to return a test value and initializes EnvConfig.
Returns:
EnvConfig: Mocked environment configuration
"""
with patch('os.getenv', return_value="test_value"):
config = EnvConfig()
return config

class OpenAIHandler:
"""Class for handling OpenAI operations."""
@pytest.mark.parametrize("env_value, expected_value", [("test_value", "test_value"), (None, None)])
def test_EnvConfig_init(env_value, expected_value, mock_env_config):
"""Test initialization of EnvConfig.
Tests if the EnvConfig is correctly initialized with environment variables.
Args:
env_value (str or None): Mock environment variable value
expected_value (str or None): Expected value for EnvConfig attributes
mock_env_config (EnvConfig): Mocked environment configuration
"""
assert mock_env_config.openai_key == expected_value
assert mock_env_config.pinecone_key == expected_value

def __init__(self, config: EnvConfig) -> None:
"""Initialize OpenAI API key."""
openai.api_key = config.openai_key

@backoff.on_exception(backoff.expo, Exception, max_tries=3)
async def create_embedding(self, input_text: str) -> Dict[str, Union[int, List[float]]]:
"""
Create an embedding using OpenAI.
@pytest.mark.asyncio
@pytest.mark.parallel
async def test_OpenAIHandler_create_embedding(mock_env_config):
"""Asynchronous test for creating embeddings via OpenAIHandler.
Tests if OpenAIHandler.create_embedding method correctly returns mock response.
Args:
mock_env_config (EnvConfig): Mocked environment configuration
"""
handler = OpenAIHandler(mock_env_config)
mock_response = {"id": 1, "values": [0.1, 0.2, 0.3]}

with patch.object(handler.openai.Embedding, 'create', return_value=mock_response):
response = await handler.create_embedding("test_text")

Parameters:
input_text (str): The text to be embedded.
Returns:
Dict[str, Union[int, List[float]]]: The embedding response.
"""
response = openai.Embedding.create(
model="text-embedding-ada-002",engine="ada",
text=input_text,
)
return response
assert response == mock_response

# Create test class
class TestOpenAIHandler(unittest.TestCase):
# Set up test environment
def setUp(self):
self.config = EnvConfig()
self.openai_handler = OpenAIHandler(self.config)
@pytest.mark.parallel
def test_PineconeHandler_init(mock_env_config):
"""Test initialization of PineconeHandler.
Tests if PineconeHandler is correctly initialized with environment variables.
Args:
mock_env_config (EnvConfig): Mocked environment configuration
"""
handler = PineconeHandler(mock_env_config)
handler.pinecone.init.assert_called_with(api_key="test_value", environment="test_value")
assert handler.index_name == "test_value"

# Test create_embedding method
@patch('openai.Embedding.create')
def test_create_embedding(self, mock_create):
input_text = 'This is a test'
expected_response = {'id': 12345, 'embedding': [1.0, 2.0, 3.0]}
mock_create.return_value = expected_response
response = self.openai_handler.create_embedding(input_text)
self.assertEqual(response, expected_response)
@pytest.mark.asyncio
@pytest.mark.parallel
async def test_PineconeHandler_upload_embedding(mock_env_config):
"""Asynchronous test for uploading embeddings via PineconeHandler.
Tests if PineconeHandler.upload_embedding method correctly calls pinecone.Index.upsert.
Args:
mock_env_config (EnvConfig): Mocked environment configuration
"""
handler = PineconeHandler(mock_env_config)
mock_embedding = {
"id": "1",
"values": [0.1, 0.2, 0.3],
"metadata": {},
"sparse_values": {}
}

with patch.object(handler.pinecone.Index, 'upsert', return_value=None):
await handler.upload_embedding(mock_embedding)

handler.pinecone.Index.assert_called_with("test_value")

if __name__ == "__main__":
unittest.main()
@pytest.mark.asyncio
@pytest.mark.parallel
async def test_DataStreamHandler_process_data(mock_env_config):
"""Asynchronous test for processing data via DataStreamHandler.
Tests if DataStreamHandler.process_data method correctly calls methods of OpenAIHandler and PineconeHandler.
Args:
mock_env_config (EnvConfig): Mocked environment configuration
"""
openai_handler = OpenAIHandler(mock_env_config)
pinecone_handler = PineconeHandler(mock_env_config)
handler = DataStreamHandler(openai_handler, pinecone_handler)

mock_data = "test_data"
mock_embedding = {"id": 1, "values": [0.1, 0.2, 0.3]}

with patch.object(OpenAIHandler, 'create_embedding', return_value=mock_embedding):
with patch.object(PineconeHandler, 'upload_embedding', return_value=None):
await handler.process_data(mock_data)
1 change: 1 addition & 0 deletions Auto-Embedder/todo.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
- test `test.py`
23 changes: 18 additions & 5 deletions GPT-Prompt-Examples/OUT-prompt-cheatsheet.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,20 @@

Only reply with the prompt text."

### 2. *Quickly Brainstorm and Problem-Solve* -
### 2. *Masked Language Model Mimicry Prompt*

AI Chatbot, your task is to mimic how masked language models fill in masked words or phrases. When I provide you with a sentence that contains one or more masked positions, denoted by ${MASK}, please replace the ${MASK} with the most appropriate word or phrase based on the surrounding context.

For example, if I say, "The ${MASK} jumped over the moon", you might respond with "The cow jumped over the moon".

Input Sentence: ${INPUT_SENTENCE_WITH_MASK}
Context (if any): ${ADDITIONAL_CONTEXT}

Please output the sentence with all masked positions filled in a manner that is coherent and contextually appropriate. Make sure to include the filled mask(s) in your response.

Output Format: [Original Sentence]: [Filled Sentence]

### 3. *Quickly Brainstorm and Problem-Solve* -

- Step 1:
- Prompt: Describe the problem area you are facing. Can you list three distinct solutions? Take into account various factors like {Specify Factors}.
Expand All @@ -20,7 +33,7 @@ Only reply with the prompt text."
- Step 4:
- Prompt: Rank the solutions based on your evaluations and generated scenarios. Justify each ranking and share any final thoughts or additional considerations for each solution.

### 3. *Configurable ${DOMAIN_TOPIC} Brainstormer* -
### 4. *Configurable ${DOMAIN_TOPIC} Brainstormer* -

- Role:
- You are ${ROLE_DESCRIPTION}.
Expand Down Expand Up @@ -51,7 +64,7 @@ Only reply with the prompt text."
- Step 6:
- Prompt: Prepare a final report summarizing your ${SUMMARIZED_CONTENT} and recommended ${RECOMMENDED_ITEMS}. Make sure your solution meets all the ${FINAL_REQUIREMENTS}.

### 4. *Dynamic Prompt/Task Template Generation* -
### 5. *Dynamic Prompt/Task Template Generation* -

"Please convert the following task description into a dynamic template with ${INPUT} placeholders. The task description is:

Expand All @@ -68,7 +81,7 @@ The template should have placeholders for:

Only reply with the updated code block."

### 5. *Programmer* -
### 6. *Programmer* -

[Message]:

Expand All @@ -80,7 +93,7 @@ Only reply with the updated code block."
- Complete each task separately, one at a time
- Let's complete all tasks step by step so we make sure we have the right answer before moving on to the next

### 5. *Senior code reviewer* -
### 7. *Senior code reviewer* -

[Message]:

Expand Down

0 comments on commit 5573db4

Please sign in to comment.