Skip to content

Commit

Permalink
Merge pull request #22 from concord-consortium/188650872-describe-gra…
Browse files Browse the repository at this point in the history
…ph-by-text

feat: describe graph (PT-188650872)
  • Loading branch information
emcelroy authored Jan 9, 2025
2 parents c547f4a + 9d6cdc1 commit a2e9139
Show file tree
Hide file tree
Showing 3 changed files with 134 additions and 88 deletions.
167 changes: 114 additions & 53 deletions src/models/assistant-model.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import { types, flow, Instance } from "mobx-state-tree";
import { OpenAI } from "openai";
import { Message } from "openai/resources/beta/threads/messages";
import { codapInterface } from "@concord-consortium/codap-plugin-api";
import { DAVAI_SPEAKER, DEBUG_SPEAKER } from "../constants";
import { formatJsonMessage } from "../utils/utils";
import { convertBase64ToImage, formatJsonMessage } from "../utils/utils";
import { requestThreadDeletion } from "../utils/openai-utils";
import { ChatTranscriptModel } from "./chat-transcript-model";
import { OpenAI } from "openai";

const OpenAIType = types.custom({
name: "OpenAIType",
Expand Down Expand Up @@ -47,6 +47,8 @@ export const AssistantModel = types
})
.volatile(() => ({
isLoadingResponse: false,
uploadFileAfterRun: false,
dataUri: "",
}))
.actions((self) => ({
handleMessageSubmitMockAssistant() {
Expand Down Expand Up @@ -100,6 +102,7 @@ export const AssistantModel = types
} catch (err) {
console.error("Failed to handle message submit:", err);
self.transcriptStore.addMessage(DEBUG_SPEAKER, {description: "Failed to handle message submit", content: formatJsonMessage(err)});
self.isLoadingResponse = false;
}
});

Expand All @@ -125,60 +128,107 @@ export const AssistantModel = types
content: formatJsonMessage(runState.status),
});

const errorStates = ["failed", "cancelled", "incomplete"];
const errorStates = ["failed", "cancelled", "incomplete"];

while (runState.status !== "completed" && runState.status !== "requires_action" && !errorStates.includes(runState.status)) {
yield new Promise((resolve) => setTimeout(resolve, 2000));
runState = yield self.apiConnection.beta.threads.runs.retrieve(self.thread.id, currentRunId);
self.transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run state status",
content: formatJsonMessage(runState.status),
});
}
while (runState.status !== "completed" && runState.status !== "requires_action" && !errorStates.includes(runState.status)) {
yield new Promise((resolve) => setTimeout(resolve, 2000));
runState = yield self.apiConnection.beta.threads.runs.retrieve(self.thread.id, currentRunId);
self.transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run state status",
content: formatJsonMessage(runState.status),
});
}

if (runState.status === "requires_action") {
self.transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run requires action",
content: formatJsonMessage(runState),
});
yield handleRequiredAction(runState, currentRunId);
yield pollRunState(currentRunId);
}

if (runState.status === "completed") {
const messages = yield self.apiConnection.beta.threads.messages.list(self.thread.id);

const lastMessageForRun = messages.data
.filter((msg: Message) => msg.run_id === currentRunId && msg.role === "assistant")
.pop();

self.transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run completed, assistant response",
content: formatJsonMessage(lastMessageForRun),
});

const lastMessageContent = lastMessageForRun?.content[0]?.text?.value;
if (lastMessageContent) {
self.transcriptStore.addMessage(DAVAI_SPEAKER, { content: lastMessageContent });
} else {
if (runState.status === "requires_action") {
self.transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run requires action",
content: formatJsonMessage(runState),
});
yield handleRequiredAction(runState, currentRunId);
yield pollRunState(currentRunId);
}

if (runState.status === "completed") {
if (self.uploadFileAfterRun && self.dataUri) {
const fileId = yield uploadFile();
yield sendFileMessage(fileId);
self.uploadFileAfterRun = false;
self.dataUri = "";
startRun();
} else {
const messages = yield self.apiConnection.beta.threads.messages.list(self.thread.id);

const lastMessageForRun = messages.data
.filter((msg: Message) => msg.run_id === currentRunId && msg.role === "assistant")
.pop();

self.transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run completed, assistant response",
content: formatJsonMessage(lastMessageForRun),
});

const lastMessageContent = lastMessageForRun?.content[0]?.text?.value;
if (lastMessageContent) {
self.transcriptStore.addMessage(DAVAI_SPEAKER, { content: lastMessageContent });
} else {
self.transcriptStore.addMessage(DAVAI_SPEAKER, {
content: "I'm sorry, I don't have a response for that.",
});
}
self.isLoadingResponse = false;
}
}

if (errorStates.includes(runState.status)) {
self.transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run failed",
content: formatJsonMessage(runState),
});
self.transcriptStore.addMessage(DAVAI_SPEAKER, {
content: "I'm sorry, I don't have a response for that.",
});
}
self.isLoadingResponse = false;
}
content: "I'm sorry, I encountered an error. Please try again.",
});
self.isLoadingResponse = false;
}
});

if (errorStates.includes(runState.status)) {
self.transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run failed",
content: formatJsonMessage(runState),
});
self.transcriptStore.addMessage(DAVAI_SPEAKER, {
content: "I'm sorry, I encountered an error. Please try again.",
});
self.isLoadingResponse = false;
}
});
const uploadFile = flow(function* () {
try {
const fileFromDataUri = yield convertBase64ToImage(self.dataUri);
const uploadedFile = yield self.apiConnection?.files.create({
file: fileFromDataUri,
purpose: "vision"
});
return uploadedFile.id;
}
catch (err) {
console.error("Failed to upload image:", err);
self.transcriptStore.addMessage(DEBUG_SPEAKER, {description: "Failed to upload image", content: formatJsonMessage(err)});
}
});

const sendFileMessage = flow(function* (fileId) {
try {
const res = yield self.apiConnection.beta.threads.messages.create(self.thread.id, {
role: "user",
content: [
{
type: "text",
text: "This is an image of a graph. Describe it for the user."
},
{
type: "image_file",
image_file: {
file_id: fileId
}
}
]
});
self.transcriptStore.addMessage(DEBUG_SPEAKER, {description: "Image uploaded", content: formatJsonMessage(res)});
} catch (err) {
console.error("Failed to send file message:", err);
self.transcriptStore.addMessage(DEBUG_SPEAKER, {description: "Failed to send file message", content: formatJsonMessage(err)});
}
});

const handleRequiredAction = flow(function* (runState, runId) {
try {
Expand All @@ -189,8 +239,18 @@ export const AssistantModel = types
const { action, resource, values } = JSON.parse(toolCall.function.arguments);
const request = { action, resource, values };
self.transcriptStore.addMessage(DEBUG_SPEAKER, { description: "Request sent to CODAP", content: formatJsonMessage(request) });
const res = yield codapInterface.sendRequest(request);
let res = yield codapInterface.sendRequest(request);
// Prepare for uploading of image file after run if the request is to get dataDisplay
const isImageSnapshotRequest = action === "get" && resource.match(/^dataDisplay/);
if (isImageSnapshotRequest) {
self.uploadFileAfterRun = true;
self.dataUri = res.values.exportDataUri;
}
self.transcriptStore.addMessage(DEBUG_SPEAKER, { description: "Response from CODAP", content: formatJsonMessage(res) });
// remove any exportDataUri value that exists since it can be large and we don't need to send it to the assistant
res = isImageSnapshotRequest
? { ...res, values: { ...res.values, exportDataUri: undefined } }
: res;
return { tool_call_id: toolCall.id, output: JSON.stringify(res) };
} else {
return { tool_call_id: toolCall.id, output: "Tool call not recognized." };
Expand All @@ -208,6 +268,7 @@ export const AssistantModel = types
} catch (err) {
console.error(err);
self.transcriptStore.addMessage(DEBUG_SPEAKER, {description: "Error taking required action", content: formatJsonMessage(err)});
self.isLoadingResponse = false;
}
});

Expand Down
35 changes: 0 additions & 35 deletions src/utils/openai-utils.ts
Original file line number Diff line number Diff line change
@@ -1,38 +1,3 @@
import { AssistantTool } from "openai/resources/beta/assistants";

export const openAiTools: AssistantTool[] = [
{
type: "function",
function: {
name: "create_request",
description: "Create a request to get data from CODAP",
strict: false,
parameters: {
type: "object",
properties: {
action: {
type: "string",
description: "The action to perform"
},
resource: {
type: "string",
description: "The resource to act upon"
},
values: {
type: "object",
description: "The values to pass to the action"
}
},
additionalProperties: false,
required: [
"action",
"resource"
]
}
}
},
];

export const requestThreadDeletion = async (threadId: string): Promise<Response> => {
const response = await fetch(`${process.env.REACT_APP_OPENAI_BASE_URL}threads/${threadId}`, {
method: "DELETE",
Expand Down
20 changes: 20 additions & 0 deletions src/utils/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -106,3 +106,23 @@ export const isShortcutPressed = (pressedKeys: Set<string>, shortcutKeys: string
});
});
};

export const convertBase64ToImage = async (base64Data: string, filename = "image.png") => {
try {
const mimeType = base64Data.match(/data:(.*?);base64/)?.[1] || "image/png";
const base64 = base64Data.split(",")[1];
const binary = atob(base64);
const binaryLength = binary.length;
const arrayBuffer = new Uint8Array(binaryLength);
for (let i = 0; i < binaryLength; i++) {
arrayBuffer[i] = binary.charCodeAt(i);
}

const blob = new Blob([arrayBuffer], { type: mimeType });
const file = new File([blob], filename, { type: mimeType });
return file;
} catch (error) {
console.error("Error converting base64 to image:", error);
throw error;
}
};

0 comments on commit a2e9139

Please sign in to comment.