Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: disable gpu toggle if no GPU is available #63

Merged
merged 3 commits into from
Jul 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion apps/desktop/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@localai/desktop",
"private": true,
"version": "0.5.1",
"version": "0.5.2",
"scripts": {
"dev:next": "next dev -p 1470",
"build:next": "next build",
Expand Down
12 changes: 11 additions & 1 deletion apps/desktop/src-tauri/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion apps/desktop/src-tauri/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ tauri-build = { version = "1.4.0", features = [] }
[dependencies]
llm = { git = "https://github.com/rustformers/llm", branch = "main", package = "llm", features = [
"default",
"cublas",
# "cublas",
] }

# llm = { git = "https://github.com/RedBoxing/llm.git", branch = "hf-tokenizer", package = "llm" }
Expand All @@ -34,6 +34,7 @@ tauri = { version = "1.4.0", features = [
"window-show",
] }

convert_case = "0.6.0"
paste = "1.0"
sys-info = "0.9.1"
num_cpus = "1.15.0"
Expand Down
9 changes: 9 additions & 0 deletions apps/desktop/src-tauri/src/inference/gpu.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#[tauri::command]
pub async fn check_gpu() -> Result<bool, String> {
// TODO: actually check if Metal is available in the future (?)
if cfg!(all(target_os = "macos", target_arch = "aarch64")) {
Ok(true)
} else {
Ok(false)
}
}
1 change: 1 addition & 0 deletions apps/desktop/src-tauri/src/inference/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
pub mod completion;
pub mod gpu;
pub mod process;
pub mod server;
pub mod server_config;
Expand Down
68 changes: 64 additions & 4 deletions apps/desktop/src-tauri/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@ fn main() {

Ok(())
})
// NOTE: New cmd should be added to src/invoke/_shared.ts
// TODO: a middleware to convert this into the ts enum would be nice
// NOTE: When adding new commands, make sure to run the generate_ts_enums test to update the TS enum
.invoke_handler(tauri::generate_handler![
config::get_config,
path::read_directory,
Expand All @@ -66,14 +65,75 @@ fn main() {
model::stats::get_model_stats,
model::config::get_model_config,
model::config::set_model_config,
model::pool::load_model,
inference::server::start_server,
inference::server::stop_server,
inference::server_config::get_server_config,
inference::server_config::set_server_config,
model::pool::load_model,
test::test_model,
inference::gpu::check_gpu,
utils::fs::open_directory,
test::test_model,
])
.run(tauri::generate_context!())
.expect("error while running tauri application");
}

#[test]
/**
* Generate the enum for the client side invocation
* Based on https://matklad.github.io/2022/03/26/self-modifying-code.html#Minimalist-Solution
*/
fn generate_ts_cmd_enums() {
use convert_case::{Case, Casing};

fn split_twice<'a>(
text: &'a str,
start_marker: &str,
end_marker: &str,
) -> Option<(&'a str, &'a str, &'a str)> {
let (prefix, rest) = text.split_once(start_marker)?;
let (mid, suffix) = rest.split_once(end_marker)?;
Some((prefix, mid, suffix))
}

let main_rs_text = std::fs::read_to_string(file!()).unwrap();

let (_, tauri_cmds, _) = split_twice(
&main_rs_text,
".invoke_handler(tauri::generate_handler![\n",
"])",
)
.unwrap();

let arms = tauri_cmds
.lines()
.map(|line| {
line
.trim()
.trim_end_matches(',')
.split("::")
.last()
.unwrap()
})
.enumerate()
// filter only non-empty string
.filter(|(_, cmd)| !cmd.is_empty())
.map(|(_, cmd)| format!(" {} = \"{cmd}\"", cmd.to_case(Case::Pascal)))
.collect::<Vec<_>>()
.join(",\n");

let ts_enum_path = std::path::Path::new("../src/features/invoke/_shared.ts");

let ts_original_text = std::fs::read_to_string(ts_enum_path).unwrap();

let new_text = {
let start_marker = " //#region GENERATED\n";
let end_marker = "\n //#endregion\n";

let (prefix, _, suffix) =
split_twice(&ts_original_text, start_marker, end_marker).unwrap();
format!("{prefix}{start_marker}{arms}{end_marker}{suffix}")
};

std::fs::write(ts_enum_path, new_text).unwrap();
}
16 changes: 15 additions & 1 deletion apps/desktop/src/features/inference-server/server-config.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ import { IntInput } from "@lab/ui/int-input"
import { Switch } from "@lab/ui/switch"
import { useState } from "react"

import { InitState, useInit } from "~features/inference-server/use-init"
import { InvokeCommand, invoke } from "~features/invoke"
import { useGlobal } from "~providers/global"

export const ServerConfig = () => {
Expand All @@ -15,6 +17,16 @@ export const ServerConfig = () => {
} = useGlobal()

const [isLoading, setIsLoading] = useState(false)

const [hasGpu, setHasGpu] = useState(false)
const gpuCheck = useInit(async () => {
const _hasGpu = await invoke(InvokeCommand.CheckGpu)
if (!_hasGpu) {
serverConfig.update({ useGpu: false })
}
setHasGpu(_hasGpu)
})

return (
<div className="flex items-center justify-end gap-2">
{/* <Button
Expand All @@ -39,7 +51,9 @@ export const ServerConfig = () => {
/>

<Switch
disabled={isStarted}
disabled={
isStarted || gpuCheck.initState !== InitState.Initialized || !hasGpu
}
className={"data-[state=checked]:border-gold-9"}
thumbClassName="data-[state=checked]:bg-gold-9"
title="GPU"
Expand Down
11 changes: 7 additions & 4 deletions apps/desktop/src/features/invoke/_shared.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@ export type InvokeIO<Input = Record<string, any>, Output = any> = {
output: Output
}

// This should match up with the list of command in apps/desktop/src-tauri/src/main.rs
// This enum is generated from the test in apps/desktop/src-tauri/src/main.rs
export enum InvokeCommand {
//#region GENERATED
GetConfig = "get_config",
ReadDirectory = "read_directory",
WriteFile = "write_file",
Expand All @@ -30,11 +31,13 @@ export enum InvokeCommand {
GetModelStats = "get_model_stats",
GetModelConfig = "get_model_config",
SetModelConfig = "set_model_config",
LoadModel = "load_model",
StartServer = "start_server",
StopServer = "stop_server",
GetServerConfig = "get_server_config",
SetServerConfig = "set_server_config",
LoadModel = "load_model",
TestModel = "test_model",
OpenDirectory = "open_directory"
CheckGpu = "check_gpu",
OpenDirectory = "open_directory",
TestModel = "test_model"
//#endregion
}
1 change: 1 addition & 0 deletions apps/desktop/src/features/invoke/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,5 @@ export type ServerCommandMap = {
path: string
config: ServerConfig
}>
[InvokeCommand.CheckGpu]: InvokeIO<never, boolean>
}