Initialize repo

This commit is contained in:
Kazimierz Ciołek
2026-02-02 13:56:14 +01:00
commit 7ecefb5621
127 changed files with 219019 additions and 0 deletions

7
src-tauri/.npmignore Normal file
View File

@@ -0,0 +1,7 @@
# Generated by Cargo
# will have compiled files and executables
/target/
# Generated by Tauri
# will have schema files for capabilities auto-completion
/gen/schemas

6341
src-tauri/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

28
src-tauri/Cargo.toml Normal file
View File

@@ -0,0 +1,28 @@
[package]
name = "NordicFlow"
version = "0.1.0"
description = "A Tauri App"
authors = ["you"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[build-dependencies]
tauri-build = { version = "2.0.3", features = [] }
[dependencies]
tauri = { version = "2.7.0", features = [] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
tauri-plugin-shell = "2"
tauri-plugin-process = "2.3.1"
tauri-plugin-sql = { version = "2", features = ["sqlite"] }
tauri-plugin-http = "2"
tokio = { version = "1", features = ["full"] }
reqwest = { version = "0.11", features = ["stream", "json"] }
futures-util = "0.3"
tauri-plugin-dialog = "2.6.0"
[features]
# This feature is used for production builds or when a dev server is not specified, DO NOT REMOVE!!
custom-protocol = ["tauri/custom-protocol"]

3
src-tauri/build.rs Normal file
View File

@@ -0,0 +1,3 @@
fn main() {
tauri_build::build()
}

View File

@@ -0,0 +1,21 @@
{
"identifier": "llama-capability",
"description": "Capability to run local AI server",
"local": true,
"windows": [
"main"
],
"permissions": [
{
"identifier": "shell:allow-execute",
"allow": [
{
"name": "llama-server",
"cmd": ".*llama-server\\.exe",
"sidecar": false,
"args": true
}
]
}
]
}

View File

@@ -0,0 +1,17 @@
{
"identifier": "migrated",
"description": "permissions that were migrated from v1",
"local": true,
"windows": [
"main"
],
"permissions": [
"core:default",
"process:default",
"shell:default",
"sql:default",
"sql:allow-execute",
"sql:allow-select",
"http:default"
]
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
{"llama-capability":{"identifier":"llama-capability","description":"Capability to run local AI server","local":true,"windows":["main"],"permissions":[{"identifier":"shell:allow-execute","allow":[{"args":true,"cmd":".*llama-server\\.exe","name":"llama-server","sidecar":false}]}]},"migrated":{"identifier":"migrated","description":"permissions that were migrated from v1","local":true,"windows":["main"],"permissions":["core:default","process:default","shell:default","sql:default","sql:allow-execute","sql:allow-select","http:default"]}}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

BIN
src-tauri/icons/128x128.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

BIN
src-tauri/icons/32x32.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 974 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 903 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

BIN
src-tauri/icons/icon.icns Normal file

Binary file not shown.

BIN
src-tauri/icons/icon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

BIN
src-tauri/icons/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@@ -0,0 +1,2 @@
@echo off
echo This is a dummy llama-server for build verification.

637
src-tauri/src/commands.rs Normal file
View File

@@ -0,0 +1,637 @@
use futures_util::StreamExt;
use std::path::PathBuf;
use tauri::{command, AppHandle, Emitter};
use tauri_plugin_shell::process::CommandEvent;
use tauri_plugin_shell::ShellExt;
use tokio::fs; // Added for tokio::fs::File
use tokio::io::AsyncWriteExt; // Added for AsyncWriteExt
const MODEL_FILENAME: &str = "llama-3.1-8b-instruct.gguf"; // Llama 3.1 8B Instruct GGUF
// The user has a specific model in mind: translategemma.
// If we download from HF, we might need to handle huge files.
// For now, I will rename it to 'model.gguf' for simplicity or keep original name.
#[command]
pub async fn check_model_exists(app: AppHandle) -> Result<bool, String> {
let model_path = get_model_path(&app)?;
if !model_path.exists() {
return Ok(false);
}
// Check magic bytes
let mut file = std::fs::File::open(&model_path).map_err(|e| e.to_string())?;
let mut buffer = [0u8; 4];
use std::io::Read;
file.read_exact(&mut buffer).map_err(|e| e.to_string())?;
// GGUF magic is "GGUF" (0x47 0x47 0x55 0x46)
if &buffer == b"GGUF" {
Ok(true)
} else {
// Corrupt file, maybe delete it?
// For now just return false so UI prompts download
let _ = std::fs::remove_file(model_path);
Ok(false)
}
}
#[command]
pub async fn download_model(
app: AppHandle,
window: tauri::Window,
url: String,
token: Option<String>,
) -> Result<String, String> {
let model_path = get_model_path(&app)?;
// Create directory if it doesn't exist
if let Some(parent) = model_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| e.to_string())?;
}
let client = reqwest::Client::new();
let mut request = client
.get(&url)
.header("User-Agent", "NordicFlow/1.0 (Desktop App)");
if let Some(t) = token {
if !t.trim().is_empty() {
request = request.header("Authorization", format!("Bearer {}", t));
}
}
let response = request.send().await.map_err(|e| e.to_string())?;
if !response.status().is_success() {
return Err(format!("Download failed: Status {}", response.status()));
}
let total_size = response.content_length().unwrap_or(0);
// Use tokio::fs::File
let mut file = fs::File::create(&model_path)
.await
.map_err(|e| e.to_string())?;
let mut stream = response.bytes_stream();
let mut downloaded: u64 = 0;
while let Some(item) = stream.next().await {
let chunk = item.map_err(|e| e.to_string())?;
file.write_all(&chunk).await.map_err(|e| e.to_string())?; // Async write
downloaded += chunk.len() as u64;
let _ = window.emit("download-progress", (downloaded, total_size));
}
Ok("Download complete".to_string())
}
fn kill_llama_server() {
#[cfg(target_os = "windows")]
let _ = std::process::Command::new("taskkill")
.args(&["/IM", "llama-server.exe", "/F"])
.output();
}
#[command]
pub async fn start_chat(app: AppHandle) -> Result<String, String> {
kill_llama_server();
let model_path = get_model_path(&app)?;
if !model_path.exists() {
return Err("Model not found. Please download it first.".to_string());
}
let llama_dir = get_llama_dir(&app)?;
// Assuming llama-server.exe is in the same directory
let server_path = llama_dir.join("llama-server.exe");
// Convert paths to string safe for Command
let server_path_str = server_path.to_string_lossy().to_string();
let model_path_str = model_path.to_string_lossy().to_string();
// Use tauri-plugin-shell to spawn
// accurate command: llama-server.exe -m model.gguf -c 2048 --port 8080
// We need to use "sidecar" or "Command::new"
// command needs to be absolute
let cmd = app
.shell()
.command(server_path_str)
.args(&[
"-m",
&model_path_str,
"-c",
"2048",
"--port",
"8080",
"--n-gpu-layers",
"99", // Try to use GPU if possible
])
.current_dir(llama_dir); // Important for DLLs
let (mut rx, _child) = cmd.spawn().map_err(|e| e.to_string())?;
// We can listen to stdout/stderr in background
tauri::async_runtime::spawn(async move {
while let Some(event) = rx.recv().await {
match event {
CommandEvent::Stdout(line) => println!("LLAMA: {:?}", String::from_utf8(line)),
CommandEvent::Stderr(line) => eprintln!("LLAMA ERR: {:?}", String::from_utf8(line)),
_ => {}
}
}
});
Ok("Server started".to_string())
}
fn get_llama_dir(_app: &AppHandle) -> Result<PathBuf, String> {
// In dev, we can resolve relative to the current working directory of the process
// which is src-tauri.
let mut path = std::env::current_dir().map_err(|e| e.to_string())?;
// if we are in src-tauri root
path.push("llama");
Ok(path)
}
fn get_model_path(app: &AppHandle) -> Result<PathBuf, String> {
// Store models in AppData/NordicFlow/models to avoid triggering dev watcher
use tauri::Manager;
let app_dir = app.path().app_local_data_dir().map_err(|e| e.to_string())?;
let model_dir = app_dir.join("models");
Ok(model_dir.join(MODEL_FILENAME))
}
const VISION_MODEL_FILENAME: &str = "minicpm-v-2.6.gguf";
const VISION_PROJ_FILENAME: &str = "minicpm-v-2.6-mmproj.gguf";
// ... existing helper ...
fn get_vision_model_path(app: &AppHandle) -> Result<PathBuf, String> {
use tauri::Manager;
let app_dir = app.path().app_local_data_dir().map_err(|e| e.to_string())?;
let model_dir = app_dir.join("models");
Ok(model_dir.join(VISION_MODEL_FILENAME))
}
fn get_vision_proj_path(app: &AppHandle) -> Result<PathBuf, String> {
use tauri::Manager;
let app_dir = app.path().app_local_data_dir().map_err(|e| e.to_string())?;
let model_dir = app_dir.join("models");
Ok(model_dir.join(VISION_PROJ_FILENAME))
}
fn validate_gguf_file(path: &PathBuf) -> bool {
if !path.exists() {
return false;
}
match std::fs::File::open(path) {
Ok(mut file) => {
let mut buffer = [0u8; 4];
use std::io::Read;
if let Ok(_) = file.read_exact(&mut buffer) {
if &buffer == b"GGUF" {
return true;
}
}
}
Err(_) => {}
}
// If we're here, it's invalid. Remove it.
let _ = std::fs::remove_file(path);
false
}
#[command]
pub async fn check_vision_model_exists(app: AppHandle) -> Result<bool, String> {
let model_path = get_vision_model_path(&app)?;
let proj_path = get_vision_proj_path(&app)?;
let model_valid = validate_gguf_file(&model_path);
let proj_valid = validate_gguf_file(&proj_path);
Ok(model_valid && proj_valid)
}
#[command]
pub async fn download_vision_model(
app: AppHandle,
window: tauri::Window,
url: String,
proj_url: Option<String>,
token: Option<String>,
) -> Result<String, String> {
let model_path = get_vision_model_path(&app)?;
let proj_path = get_vision_proj_path(&app)?;
if let Some(parent) = model_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| e.to_string())?;
}
let client = reqwest::Client::new();
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
"User-Agent",
"NordicFlow/1.0 (Desktop App)".parse().unwrap(),
);
if let Some(ref t) = token {
if !t.trim().is_empty() {
headers.insert("Authorization", format!("Bearer {}", t).parse().unwrap());
}
}
// Helper to download a single file
async fn download_file(
client: &reqwest::Client,
url: &str,
headers: reqwest::header::HeaderMap,
dest_path: &PathBuf,
window: &tauri::Window,
event_name: &str,
start_offset: u64,
total_combined_size: u64,
) -> Result<u64, String> {
let response = client
.get(url)
.headers(headers)
.send()
.await
.map_err(|e| e.to_string())?;
if !response.status().is_success() {
return Err(format!(
"Download failed for {}: Status {}",
url,
response.status()
));
}
let content_len = response.content_length().unwrap_or(0);
let part_path = dest_path.with_extension("gguf.part");
let mut file = fs::File::create(&part_path)
.await
.map_err(|e| e.to_string())?;
let mut stream = response.bytes_stream();
let mut downloaded_bytes: u64 = 0;
while let Some(item) = stream.next().await {
let chunk = item.map_err(|e| e.to_string())?;
file.write_all(&chunk).await.map_err(|e| e.to_string())?;
downloaded_bytes += chunk.len() as u64;
// Emit progress relative to the specific file or total?
// Let's emit simple (current, total) for the specific component OR try to combine.
// For simplicity, let's just emit raw bytes downloaded for THIS file, frontend handles reset.
// OR we can pass a 'base' offset if we want combined bar.
let _ = window.emit(
event_name,
(start_offset + downloaded_bytes, total_combined_size),
);
}
file.flush().await.map_err(|e| e.to_string())?;
fs::rename(&part_path, dest_path)
.await
.map_err(|e| e.to_string())?;
Ok(content_len)
}
// 1. Download Model
// We don't know combined size upfront easily without HEAD requests.
// Let's just do two separate download bars or sequential events.
// Ideally we assume Model is 5GB. Proj is 0.2GB.
// Let's just run them and emit progress. The frontend can handle it resetting or we just sum it up.
// We'll perform HEAD requests first to get sizes?
// HEAD request for Model
let resp_model = client
.head(&url)
.headers(headers.clone())
.send()
.await
.map_err(|e| e.to_string())?;
let mut size_model = resp_model.content_length().unwrap_or(0);
if size_model == 0 {
size_model = 4670 * 1024 * 1024; // ~4.6 GB fallback
}
let mut size_proj = 0;
if let Some(ref p_url) = proj_url {
let resp_proj = client
.head(p_url)
.headers(headers.clone())
.send()
.await
.map_err(|e| e.to_string())?;
let mut s = resp_proj.content_length().unwrap_or(0);
if s == 0 {
s = 200 * 1024 * 1024; // ~200 MB fallback
}
size_proj = s;
}
let total_size = size_model + size_proj;
// Download Main Model
download_file(
&client,
&url,
headers.clone(),
&model_path,
&window,
"download-vision-progress",
0,
total_size,
)
.await?;
// Download Proj
if let Some(ref p_url) = proj_url {
download_file(
&client,
p_url,
headers.clone(),
&proj_path,
&window,
"download-vision-progress",
size_model,
total_size,
)
.await?;
}
Ok("Download complete".to_string())
}
#[command]
pub async fn analyze_image(app: AppHandle, image_path: String) -> Result<String, String> {
// Free up memory by killing potential chat server
kill_llama_server();
let model_path = get_vision_model_path(&app)?;
let proj_path = get_vision_proj_path(&app)?;
if !model_path.exists() {
return Err("Vision model not found".to_string());
}
if !proj_path.exists() {
return Err("Vision projector not found".to_string());
}
let llama_dir = get_llama_dir(&app)?;
let cli_path = llama_dir.join("llama-mtmd-cli.exe");
let mut actual_image_path = PathBuf::from(&image_path);
let mut is_temp = false;
// ... (URL handling code same as before) ...
if image_path.starts_with("http") {
let client = reqwest::Client::new();
let response = client
.get(&image_path)
.header("User-Agent", "NordicFlow/1.0 (Desktop App)")
.send()
.await
.map_err(|e| format!("Failed to fetch image: {}", e))?;
if !response.status().is_success() {
return Err(format!("Image download failed: {}", response.status()));
}
use tauri::Manager;
let temp_dir = app
.path()
.app_cache_dir()
.map_err(|e| e.to_string())?
.join("temp_images");
fs::create_dir_all(&temp_dir)
.await
.map_err(|e| e.to_string())?;
let filename = response
.url()
.path_segments()
.and_then(|segments| segments.last())
.and_then(|name| if name.is_empty() { None } else { Some(name) })
.unwrap_or("temp_image.jpg");
let temp_path = temp_dir.join(filename);
let content = response.bytes().await.map_err(|e| e.to_string())?;
let mut file = fs::File::create(&temp_path)
.await
.map_err(|e| e.to_string())?;
file.write_all(&content).await.map_err(|e| e.to_string())?;
actual_image_path = temp_path;
is_temp = true;
}
// Command: llama-mtmd-cli -m model.gguf --mmproj proj.gguf --image image.jpg -p "Describe this image"
let output = app
.shell()
.command(cli_path.to_string_lossy().to_string())
.args(&[
"-m",
&model_path.to_string_lossy().to_string(),
"--mmproj",
&proj_path.to_string_lossy().to_string(),
"--image",
&actual_image_path.to_string_lossy().to_string(),
"-p",
"Describe this image in detail.",
"--temp",
"0.1",
])
.current_dir(&llama_dir)
.output()
.await
.map_err(|e| e.to_string())?;
// Cleanup temp
if is_temp {
let _ = fs::remove_file(&actual_image_path).await;
}
if output.status.success() {
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
Ok(stdout)
} else {
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Err(format!(
"Analysis failed. Stdout: {}. Stderr: {}",
stdout, stderr
))
}
}
const WHISPER_MODEL_FILENAME: &str = "nb-whisper-medium-q5_0.gguf";
fn get_whisper_model_path(app: &AppHandle) -> Result<PathBuf, String> {
use tauri::Manager;
let app_dir = app.path().app_local_data_dir().map_err(|e| e.to_string())?;
let model_dir = app_dir.join("models");
Ok(model_dir.join(WHISPER_MODEL_FILENAME))
}
#[command]
pub async fn check_whisper_dependencies(app: AppHandle) -> Result<bool, String> {
let llama_dir = get_llama_dir(&app)?;
let whisper_cli = llama_dir.join("whisper-cli.exe");
let ffmpeg = llama_dir.join("ffmpeg.exe");
Ok(whisper_cli.exists() && ffmpeg.exists())
}
#[command]
pub async fn check_whisper_model_exists(app: AppHandle) -> Result<bool, String> {
let model_path = get_whisper_model_path(&app)?;
Ok(validate_gguf_file(&model_path))
}
#[command]
pub async fn download_whisper_model(
app: AppHandle,
window: tauri::Window,
url: String, // Allow overriding URL, but default in frontend
token: Option<String>,
) -> Result<String, String> {
let model_path = get_whisper_model_path(&app)?;
// Check if dir exists
if let Some(parent) = model_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| e.to_string())?;
}
let client = reqwest::Client::new();
let mut request = client.get(&url).header("User-Agent", "NordicFlow/1.0");
if let Some(t) = token {
if !t.trim().is_empty() {
request = request.header("Authorization", format!("Bearer {}", t));
}
}
let response = request.send().await.map_err(|e| e.to_string())?;
if !response.status().is_success() {
return Err(format!("Download failed: {}", response.status()));
}
let total_size = response.content_length().unwrap_or(0);
let mut file = fs::File::create(&model_path)
.await
.map_err(|e| e.to_string())?;
let mut stream = response.bytes_stream();
let mut downloaded: u64 = 0;
while let Some(item) = stream.next().await {
let chunk = item.map_err(|e| e.to_string())?;
file.write_all(&chunk).await.map_err(|e| e.to_string())?;
downloaded += chunk.len() as u64;
let _ = window.emit("download-whisper-progress", (downloaded, total_size));
}
Ok("Download complete".to_string())
}
#[command]
pub async fn transcribe_media(app: AppHandle, video_path: String) -> Result<String, String> {
let llama_dir = get_llama_dir(&app)?;
let ffmpeg_path = llama_dir.join("ffmpeg.exe");
let whisper_cli_path = llama_dir.join("whisper-cli.exe");
let model_path = get_whisper_model_path(&app)?;
if !ffmpeg_path.exists() || !whisper_cli_path.exists() {
return Err("Missing ffmpeg or whisper-cli".to_string());
}
if !model_path.exists() {
return Err("Whisper model not found".to_string());
}
// 1. Convert to WAV (16kHz, mono)
use tauri::Manager;
let temp_dir = app
.path()
.app_cache_dir()
.map_err(|e| e.to_string())?
.join("temp_audio");
fs::create_dir_all(&temp_dir)
.await
.map_err(|e| e.to_string())?;
let output_wav = temp_dir.join("output.wav");
// ffmpeg -i input.mp4 -ar 16000 -ac 1 -c:a pcm_s16le output.wav -y
let ffmpeg_out = app
.shell()
.command(ffmpeg_path.to_string_lossy().to_string())
.args(&[
"-i",
&video_path,
"-ar",
"16000",
"-ac",
"1",
"-c:a",
"pcm_s16le",
output_wav.to_string_lossy().to_string().as_str(),
"-y",
])
.output()
.await
.map_err(|e| e.to_string())?;
if !ffmpeg_out.status.success() {
let stderr = String::from_utf8_lossy(&ffmpeg_out.stderr).to_string();
return Err(format!("FFmpeg failed: {}", stderr));
}
// 2. Transcribe
// whisper-cli -m model.gguf -f output.wav -l no --output-json
// We want plain text or json? JSON gives timestamps. Let's get JSON.
// The CLI usually writes to a file. We can specify output file base properly?
// command: ./main -m ... -f ... -oj
// It will produce output.wav.json
let whisper_out = app
.shell()
.command(whisper_cli_path.to_string_lossy().to_string())
.args(&[
"-m",
&model_path.to_string_lossy().to_string(),
"-f",
&output_wav.to_string_lossy().to_string(),
"-l",
"no", // Norwegian
"-oj", // Output JSON
"-nt", // No Text output to stdout (cleaner logs)
])
.current_dir(&llama_dir) // Important?
.output()
.await
.map_err(|e| e.to_string())?;
if !whisper_out.status.success() {
let stderr = String::from_utf8_lossy(&whisper_out.stderr).to_string();
return Err(format!("Whisper failed: {}", stderr));
}
// 3. Read the JSON output
// The tool creates [filename].json
let json_path = output_wav.with_extension("wav.json"); // Whisper.cpp appends .json to input filename usually? Or replace?
// Usually it appends. Let's check typical behavior. "output.wav" -> "output.wav.json".
if !json_path.exists() {
return Err(format!("JSON output not found at {:?}", json_path));
}
let json_content = fs::read_to_string(&json_path)
.await
.map_err(|e| e.to_string())?;
// Cleanup
let _ = fs::remove_file(&output_wav).await;
let _ = fs::remove_file(&json_path).await;
Ok(json_content)
}

47
src-tauri/src/db.rs Normal file
View File

@@ -0,0 +1,47 @@
use tauri_plugin_sql::{Migration, MigrationKind};
pub fn get_migrations() -> Vec<Migration> {
let migrations = vec![
Migration {
version: 1,
description: "create_flashcards_tables",
sql: "
CREATE TABLE IF NOT EXISTS flashcard_sets (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
topic TEXT NOT NULL,
color TEXT NOT NULL,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS flashcards (
id TEXT PRIMARY KEY,
set_id TEXT NOT NULL,
front TEXT NOT NULL,
back TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'new',
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (set_id) REFERENCES flashcard_sets(id) ON DELETE CASCADE
);
",
kind: MigrationKind::Up,
},
Migration {
version: 2,
description: "create_smart_stories_table",
sql: "
CREATE TABLE IF NOT EXISTS smart_stories (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
content TEXT NOT NULL,
difficulty TEXT,
topic TEXT,
vocab_json TEXT,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
",
kind: MigrationKind::Up,
},
];
migrations
}

39
src-tauri/src/main.rs Normal file
View File

@@ -0,0 +1,39 @@
mod commands;
mod db;
mod models;
// Learn more about Tauri commands at https://tauri.app/v1/guides/features/command
#[tauri::command]
fn greet(name: &str) -> String {
format!("Hello, {}! You've been greeted from Rust!", name)
}
fn main() {
let migrations = db::get_migrations();
tauri::Builder::default()
.plugin(tauri_plugin_shell::init())
.plugin(tauri_plugin_process::init())
.plugin(tauri_plugin_http::init())
.plugin(tauri_plugin_dialog::init())
.plugin(
tauri_plugin_sql::Builder::default()
.add_migrations("sqlite:flashcards.db", migrations)
.build(),
)
.invoke_handler(tauri::generate_handler![
greet,
commands::start_chat,
commands::check_model_exists,
commands::download_model,
commands::check_vision_model_exists,
commands::download_vision_model,
commands::analyze_image,
commands::check_whisper_dependencies,
commands::check_whisper_model_exists,
commands::download_whisper_model,
commands::transcribe_media
])
.run(tauri::generate_context!())
.expect("error while running tauri application");
}

32
src-tauri/src/models.rs Normal file
View File

@@ -0,0 +1,32 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct FlashcardSet {
pub id: String,
pub title: String,
pub topic: String,
pub color: String,
pub created_at: Option<String>,
// In Rust, we might not include the cards directly in the set struct when fetching lists,
// but for compatibility with the frontend mock, we can include them or fetch them separately.
// For now, let's keep it simple and fetch cards separately usually, but here is a struct that mirrors the frontend.
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Flashcard {
pub id: String,
pub set_id: String,
pub front: String,
pub back: String,
pub status: String,
pub created_at: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct FlashcardSetWithCards {
pub id: String,
pub title: String,
pub topic: String,
pub color: String,
pub cards: Vec<Flashcard>,
}

40
src-tauri/tauri.conf.json Normal file
View File

@@ -0,0 +1,40 @@
{
"build": {
"beforeDevCommand": "npm run dev",
"beforeBuildCommand": "npm run build",
"frontendDist": "../dist",
"devUrl": "http://localhost:1420"
},
"bundle": {
"active": true,
"targets": "all",
"icon": [
"icons/32x32.png",
"icons/128x128.png",
"icons/128x128@2x.png",
"icons/icon.icns",
"icons/icon.ico"
]
},
"productName": "NordicFlow",
"version": "0.1.0",
"identifier": "pl.kamici.nordicflow",
"plugins": {
"process": {
"active": true
}
},
"app": {
"security": {
"csp": null
},
"windows": [
{
"title": "NordicFlow",
"width": 1000,
"height": 600,
"dragDropEnabled": false
}
]
}
}