diff --git a/Cargo.lock b/Cargo.lock index bf50ab7..19eb58f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2842,6 +2842,18 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fastrand" version = "1.9.0" @@ -3324,6 +3336,15 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "hashlink" version = "0.10.0" @@ -6306,6 +6327,20 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" +[[package]] +name = "rusqlite" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" +dependencies = [ + "bitflags 2.10.0", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink 0.9.1", + "libsqlite3-sys", + "smallvec", +] + [[package]] name = "rust_decimal" version = "1.40.0" @@ -7302,6 +7337,16 @@ dependencies = [ "der 0.7.10", ] +[[package]] +name = "sqlparser" +version = "0.44.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaf9c7ff146298ffda83a200f8d5084f08dcee1edfc135fcc1d646a45d50ffd6" +dependencies = [ + "log", + "serde", +] + [[package]] name = "sqlx" version = "0.8.6" @@ -7336,7 +7381,7 @@ dependencies = [ "futures-io", "futures-util", "hashbrown 0.15.5", - "hashlink", + "hashlink 0.10.0", "indexmap 2.13.0", "log", "memchr", @@ -8627,15 +8672,18 @@ dependencies = [ "libp2p", "multihash-derive 0.9.1", "pin-project", + "rusqlite", "sea-orm", "sea-orm-migration", "serde", "serde_ipld_dagcbor 0.3.0", "serde_json", + "sqlparser", "thiserror 2.0.18", "time", "tinycloud-lib", "tokio", + "tracing", "ucan-capabilities-object", ] diff --git a/Cargo.toml b/Cargo.toml index 5c275ab..bb8d642 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,6 +79,9 @@ async-trait = "0.1" ipld-core = { version = "0.4", features = ["serde"] } serde_ipld_dagcbor = "0.6" ucan-capabilities-object = { git = "https://github.com/tinycloudlabs/ucan-capabilities-object" } +rusqlite = { version = "0.32", features = ["bundled", "column_decltype", "hooks", "backup"] } +sqlparser = { version = "0.44", features = ["serde"] } +tracing = "0.1" # Internal crate dependencies tinycloud-lib = { path = "tinycloud-lib", version = "1.0.0" } diff --git a/src/auth_guards.rs b/src/auth_guards.rs index 4ebf943..0bef4f3 100644 --- a/src/auth_guards.rs +++ b/src/auth_guards.rs @@ -105,6 +105,11 @@ where .map_err(|_| Status::InternalServerError)?, ) .respond_to(request), + InvocationOutcome::SqlResult(json) => Json(json).respond_to(request), + InvocationOutcome::SqlExport(data) => Response::build() + .header(ContentType::new("application", "x-sqlite3")) + .sized_body(data.len(), std::io::Cursor::new(data)) + .ok(), } } } diff --git a/src/config.rs b/src/config.rs index e478d62..b54552f 100644 --- a/src/config.rs +++ b/src/config.rs @@ -83,6 +83,33 @@ pub struct SpacesConfig { pub allowlist: Option, } +#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] +pub struct SqlStorageConfig { + #[serde(default = "default_sql_path")] + pub path: String, + pub limit: Option, + #[serde(default = "default_sql_memory_threshold")] + pub memory_threshold: ByteUnit, +} + +fn default_sql_path() -> String { + "./tinycloud/sql".to_string() +} + +fn default_sql_memory_threshold() -> ByteUnit { + ByteUnit::Mebibyte(10) +} + +impl Default for SqlStorageConfig { + fn default() -> Self { + Self { + path: default_sql_path(), + limit: None, + memory_threshold: default_sql_memory_threshold(), + } + } +} + #[serde_as] #[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)] pub struct Storage { @@ -95,6 +122,8 @@ pub struct Storage { #[serde(default = "memory_db")] pub database: String, pub limit: Option, + #[serde(default)] + pub sql: SqlStorageConfig, } impl Default for Storage { @@ -104,6 +133,7 @@ impl Default for Storage { staging: StagingStorage::default().into(), database: memory_db(), limit: None, + sql: SqlStorageConfig::default(), } } } diff --git a/src/lib.rs b/src/lib.rs index 52bf941..aa81528 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -26,6 +26,7 @@ use storage::{ use tinycloud_core::{ keys::{SecretsSetup, StaticSecret}, sea_orm::{ConnectOptions, Database, DatabaseConnection}, + sql::SqlService, storage::{either::Either, memory::MemoryStaging, StorageConfig}, SpaceDatabase, }; @@ -93,6 +94,11 @@ pub async fn app(config: &Figment) -> Result> { ) .await?; + let sql_service = SqlService::new( + tinycloud_config.storage.sql.path.clone(), + tinycloud_config.storage.sql.memory_threshold.as_u64(), + ); + let rocket = rocket::custom(config) .mount("/", routes) .attach(AdHoc::config::()) @@ -100,6 +106,7 @@ pub async fn app(config: &Figment) -> Result> { header_name: tinycloud_config.log.tracing.traceheader, }) .manage(tinycloud) + .manage(sql_service) .manage(tinycloud_config.storage.staging.open().await?); if tinycloud_config.cors { diff --git a/src/routes/mod.rs b/src/routes/mod.rs index bc64943..50cb7e8 100644 --- a/src/routes/mod.rs +++ b/src/routes/mod.rs @@ -2,6 +2,7 @@ use anyhow::Result; use rocket::{data::ToByteUnit, http::Status, serde::json::Json, State}; use serde::Serialize; use std::collections::HashMap; +use tokio::io::AsyncReadExt; use tokio_util::compat::TokioAsyncReadCompatExt; use tracing::{info_span, Instrument}; @@ -14,10 +15,11 @@ use crate::{ }; use tinycloud_core::{ sea_orm::DbErr, + sql::{SqlCaveats, SqlError, SqlRequest, SqlService}, storage::{ImmutableReadStore, ImmutableStaging}, types::Resource, util::{DelegationInfo, InvocationInfo}, - TxError, TxStoreError, + InvocationOutcome, TxError, TxStoreError, }; pub mod util; @@ -35,7 +37,7 @@ pub fn version() -> Json { Json(VersionInfo { protocol: tinycloud_lib::protocol::PROTOCOL_VERSION, version: env!("CARGO_PKG_VERSION").to_string(), - features: vec!["kv", "delegation", "sharing"], + features: vec!["kv", "delegation", "sharing", "sql"], }) } @@ -116,6 +118,7 @@ pub async fn delegate( } #[post("/invoke", data = "")] +#[allow(clippy::too_many_arguments)] pub async fn invoke( i: AuthHeaderGetter, req_span: TracingSpan, @@ -124,6 +127,7 @@ pub async fn invoke( staging: &State, tinycloud: &State, config: &State, + sql_service: &State, ) -> Result::Readable>, (Status, String)> { let action_label = "invocation"; let span = info_span!(parent: &req_span.0, "invoke", action = %action_label); @@ -133,6 +137,32 @@ pub async fn invoke( .with_label_values(&["invoke"]) .start_timer(); + // Check for SQL capabilities + let sql_caps: Vec<_> = i + .0 + .0 + .capabilities + .iter() + .filter_map(|c| match (&c.resource, c.ability.as_ref().as_ref()) { + (Resource::TinyCloud(r), ability) + if r.service().as_str() == "sql" && ability.starts_with("tinycloud.sql/") => + { + Some(( + r.space().clone(), + r.path().map(|p| p.to_string()), + ability.to_string(), + )) + } + _ => None, + }) + .collect(); + + if !sql_caps.is_empty() { + let result = handle_sql_invoke(i, data, tinycloud, sql_service, &sql_caps).await; + timer.observe_duration(); + return result; + } + let mut put_iter = i.0 .0.capabilities.iter().filter_map(|c| { match (&c.resource, c.ability.as_ref().as_ref()) { (Resource::TinyCloud(r), "tinycloud.kv/put") @@ -229,3 +259,100 @@ pub async fn invoke( .instrument(span) .await } + +async fn handle_sql_invoke( + i: AuthHeaderGetter, + data: DataIn<'_>, + tinycloud: &State, + sql_service: &State, + sql_caps: &[(tinycloud_lib::resource::SpaceId, Option, String)], +) -> Result::Readable>, (Status, String)> { + // Extract caveats from the invocation facts before consuming i + let caveats: Option = + i.0 .0 + .invocation + .payload() + .facts + .as_ref() + .and_then(|facts| { + facts.iter().find_map(|fact| { + fact.as_object() + .and_then(|obj| obj.get("sqlCaveats")) + .and_then(|v| serde_json::from_value(v.clone()).ok()) + }) + }); + + // Verify authorization by invoking with empty inputs + // SQL capabilities don't match KV patterns, so invoke just verifies auth + tinycloud + .invoke::(i.0, HashMap::new()) + .await + .map_err(|e| { + ( + match e { + TxStoreError::Tx(TxError::SpaceNotFound) => Status::NotFound, + TxStoreError::Tx(TxError::Db(DbErr::ConnectionAcquire(_))) => { + Status::InternalServerError + } + _ => Status::Unauthorized, + }, + e.to_string(), + ) + })?; + + // Read the request body as JSON + let body_str = match data { + DataIn::One(d) => { + let mut buf = Vec::new(); + let mut reader = d.open(1u8.megabytes()); + reader + .read_to_end(&mut buf) + .await + .map_err(|e| (Status::BadRequest, e.to_string()))?; + String::from_utf8(buf).map_err(|e| (Status::BadRequest, e.to_string()))? + } + _ => { + return Err((Status::BadRequest, "Expected JSON body".to_string())); + } + }; + + let (space, path, ability) = &sql_caps[0]; + let db_name = SqlService::db_name_from_path(path.as_deref()); + + let sql_request: SqlRequest = + serde_json::from_str(&body_str).map_err(|e| (Status::BadRequest, e.to_string()))?; + + // Handle export specially + if matches!(sql_request, SqlRequest::Export) { + let data = sql_service + .export(space, &db_name) + .await + .map_err(|e| (sql_error_to_status(&e), e.to_string()))?; + return Ok(DataOut::One(InvOut(InvocationOutcome::SqlExport(data)))); + } + + let response = sql_service + .execute(space, &db_name, sql_request, caveats, ability.clone()) + .await + .map_err(|e| (sql_error_to_status(&e), e.to_string()))?; + + let json = + serde_json::to_value(response).map_err(|e| (Status::InternalServerError, e.to_string()))?; + + Ok(DataOut::One(InvOut(InvocationOutcome::SqlResult(json)))) +} + +fn sql_error_to_status(err: &SqlError) -> Status { + match err { + SqlError::Sqlite(_) => Status::BadRequest, + SqlError::PermissionDenied(_) => Status::Forbidden, + SqlError::DatabaseNotFound => Status::NotFound, + SqlError::ResponseTooLarge(_) => Status::new(413), + SqlError::QuotaExceeded => Status::new(429), + SqlError::InvalidStatement(_) => Status::BadRequest, + SqlError::SchemaError(_) => Status::BadRequest, + SqlError::ReadOnlyViolation => Status::Forbidden, + SqlError::ParseError(_) => Status::BadRequest, + SqlError::Internal(_) => Status::InternalServerError, + } +} diff --git a/tinycloud-core/Cargo.toml b/tinycloud-core/Cargo.toml index 5dbce73..d1bfc94 100644 --- a/tinycloud-core/Cargo.toml +++ b/tinycloud-core/Cargo.toml @@ -29,6 +29,10 @@ serde_json.workspace = true serde_ipld_dagcbor = "0.3" ucan-capabilities-object.workspace = true multihash-derive = "0.9" +rusqlite.workspace = true +sqlparser.workspace = true +tracing.workspace = true +tokio.workspace = true [dev-dependencies] sea-orm = { version = "1.1", features = ["runtime-tokio-rustls", "sqlx-sqlite"] } diff --git a/tinycloud-core/src/db.rs b/tinycloud-core/src/db.rs index c289f54..7a3641a 100644 --- a/tinycloud-core/src/db.rs +++ b/tinycloud-core/src/db.rs @@ -373,6 +373,8 @@ pub enum InvocationOutcome { OpenSessions(HashMap), /// Ordered delegation chain from leaf to root DelegationChain(Vec), + SqlResult(serde_json::Value), + SqlExport(Vec), } impl From for TxError { diff --git a/tinycloud-core/src/lib.rs b/tinycloud-core/src/lib.rs index b0aeade..a0f15dc 100644 --- a/tinycloud-core/src/lib.rs +++ b/tinycloud-core/src/lib.rs @@ -6,6 +6,7 @@ pub mod manifest; pub mod migrations; pub mod models; pub mod relationships; +pub mod sql; pub mod storage; pub mod types; pub mod util; diff --git a/tinycloud-core/src/migrations/m20260218_sql_database.rs b/tinycloud-core/src/migrations/m20260218_sql_database.rs new file mode 100644 index 0000000..bca6f3d --- /dev/null +++ b/tinycloud-core/src/migrations/m20260218_sql_database.rs @@ -0,0 +1,58 @@ +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[derive(Iden)] +enum SqlDatabase { + Table, + Space, + Name, + CreatedAt, + SizeBytes, + StorageMode, +} + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .create_table( + Table::create() + .table(SqlDatabase::Table) + .if_not_exists() + .col(ColumnDef::new(SqlDatabase::Space).string().not_null()) + .col(ColumnDef::new(SqlDatabase::Name).string().not_null()) + .col( + ColumnDef::new(SqlDatabase::CreatedAt) + .timestamp_with_time_zone() + .not_null(), + ) + .col( + ColumnDef::new(SqlDatabase::SizeBytes) + .big_integer() + .not_null() + .default(0), + ) + .col( + ColumnDef::new(SqlDatabase::StorageMode) + .string() + .not_null() + .default("memory"), + ) + .primary_key( + Index::create() + .col(SqlDatabase::Space) + .col(SqlDatabase::Name), + ) + .to_owned(), + ) + .await + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .drop_table(Table::drop().table(SqlDatabase::Table).to_owned()) + .await + } +} diff --git a/tinycloud-core/src/migrations/mod.rs b/tinycloud-core/src/migrations/mod.rs index 6911491..a9dd749 100644 --- a/tinycloud-core/src/migrations/mod.rs +++ b/tinycloud-core/src/migrations/mod.rs @@ -1,11 +1,15 @@ use sea_orm_migration::prelude::*; pub mod m20230510_101010_init_tables; +pub mod m20260218_sql_database; pub struct Migrator; #[async_trait::async_trait] impl MigratorTrait for Migrator { fn migrations() -> Vec> { - vec![Box::new(m20230510_101010_init_tables::Migration)] + vec![ + Box::new(m20230510_101010_init_tables::Migration), + Box::new(m20260218_sql_database::Migration), + ] } } diff --git a/tinycloud-core/src/sql/authorizer.rs b/tinycloud-core/src/sql/authorizer.rs new file mode 100644 index 0000000..e27fc47 --- /dev/null +++ b/tinycloud-core/src/sql/authorizer.rs @@ -0,0 +1,234 @@ +use rusqlite::hooks::{AuthAction, AuthContext, Authorization}; + +use super::caveats::SqlCaveats; + +pub fn create_authorizer( + caveats: Option, + ability: String, + is_admin: bool, +) -> impl FnMut(AuthContext<'_>) -> Authorization { + move |ctx: AuthContext<'_>| match ctx.action { + // Always deny attach/detach + AuthAction::Attach { .. } | AuthAction::Detach { .. } => Authorization::Deny, + + // Pragma whitelist + AuthAction::Pragma { pragma_name, .. } => { + let readonly_pragmas = [ + "table_info", + "table_list", + "table_xinfo", + "database_list", + "index_list", + "index_info", + "foreign_key_list", + ]; + if readonly_pragmas.contains(&pragma_name) || is_admin { + Authorization::Allow + } else { + Authorization::Deny + } + } + + // Function whitelist + AuthAction::Function { function_name } => { + let allowed_functions = [ + // Standard SQL + "abs", + "changes", + "char", + "coalesce", + "glob", + "hex", + "ifnull", + "iif", + "instr", + "last_insert_rowid", + "length", + "like", + "likely", + "lower", + "ltrim", + "max", + "min", + "nullif", + "printf", + "quote", + "random", + "randomblob", + "replace", + "round", + "rtrim", + "sign", + "soundex", + "substr", + "substring", + "total_changes", + "trim", + "typeof", + "unicode", + "unlikely", + "upper", + "zeroblob", + // Aggregate + "avg", + "count", + "group_concat", + "sum", + "total", + // Date/time + "date", + "time", + "datetime", + "julianday", + "strftime", + "unixepoch", + "timediff", + // JSON + "json", + "json_array", + "json_array_length", + "json_extract", + "json_insert", + "json_object", + "json_patch", + "json_remove", + "json_replace", + "json_set", + "json_type", + "json_valid", + "json_quote", + "json_group_array", + "json_group_object", + "json_each", + "json_tree", + // Math + "acos", + "acosh", + "asin", + "asinh", + "atan", + "atan2", + "atanh", + "ceil", + "ceiling", + "cos", + "cosh", + "degrees", + "exp", + "floor", + "ln", + "log", + "log10", + "log2", + "mod", + "pi", + "pow", + "power", + "radians", + "sin", + "sinh", + "sqrt", + "tan", + "tanh", + "trunc", + ]; + if allowed_functions.contains(&function_name) { + Authorization::Allow + } else { + Authorization::Deny + } + } + + // Read operations: check table/column caveats + AuthAction::Read { + table_name, + column_name, + } => { + if let Some(ref caveats) = caveats { + if !caveats.is_table_allowed(table_name) { + return Authorization::Deny; + } + if !caveats.is_column_allowed(column_name) { + return Authorization::Deny; + } + } + Authorization::Allow + } + + // Write operations + AuthAction::Insert { table_name } | AuthAction::Delete { table_name } => { + if matches!( + ability.as_str(), + "tinycloud.sql/read" | "tinycloud.sql/select" + ) { + return Authorization::Deny; + } + if let Some(ref caveats) = caveats { + if !caveats.is_write_allowed() { + return Authorization::Deny; + } + if !caveats.is_table_allowed(table_name) { + return Authorization::Deny; + } + } + Authorization::Allow + } + + AuthAction::Update { + table_name, + column_name, + } => { + if matches!( + ability.as_str(), + "tinycloud.sql/read" | "tinycloud.sql/select" + ) { + return Authorization::Deny; + } + if let Some(ref caveats) = caveats { + if !caveats.is_write_allowed() { + return Authorization::Deny; + } + if !caveats.is_table_allowed(table_name) { + return Authorization::Deny; + } + if !caveats.is_column_allowed(column_name) { + return Authorization::Deny; + } + } + Authorization::Allow + } + + // DDL operations + AuthAction::CreateTable { .. } + | AuthAction::CreateTempTable { .. } + | AuthAction::DropTable { .. } + | AuthAction::DropTempTable { .. } + | AuthAction::AlterTable { .. } + | AuthAction::CreateIndex { .. } + | AuthAction::DropIndex { .. } + | AuthAction::CreateTrigger { .. } + | AuthAction::DropTrigger { .. } + | AuthAction::CreateView { .. } + | AuthAction::DropView { .. } + | AuthAction::CreateTempIndex { .. } + | AuthAction::DropTempIndex { .. } + | AuthAction::CreateTempTrigger { .. } + | AuthAction::DropTempTrigger { .. } + | AuthAction::CreateTempView { .. } + | AuthAction::DropTempView { .. } => { + if !is_admin && !matches!(ability.as_str(), "tinycloud.sql/write" | "tinycloud.sql/*") { + Authorization::Deny + } else { + Authorization::Allow + } + } + + // Allow internal operations + AuthAction::Transaction { .. } | AuthAction::Savepoint { .. } | AuthAction::Select => { + Authorization::Allow + } + + // Deny everything else + _ => Authorization::Deny, + } +} diff --git a/tinycloud-core/src/sql/caveats.rs b/tinycloud-core/src/sql/caveats.rs new file mode 100644 index 0000000..55c73c9 --- /dev/null +++ b/tinycloud-core/src/sql/caveats.rs @@ -0,0 +1,47 @@ +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct SqlCaveats { + pub tables: Option>, + pub columns: Option>, + pub statements: Option>, + pub read_only: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PreparedStatement { + pub name: String, + pub sql: String, +} + +impl SqlCaveats { + pub fn from_caveats(caveats: &BTreeMap) -> Option { + serde_json::to_value(caveats) + .ok() + .and_then(|v| serde_json::from_value(v).ok()) + } + + pub fn is_table_allowed(&self, table: &str) -> bool { + match &self.tables { + None => true, + Some(tables) => tables.iter().any(|t| t == table), + } + } + + pub fn is_column_allowed(&self, column: &str) -> bool { + match &self.columns { + None => true, + Some(columns) => columns.iter().any(|c| c == column), + } + } + + pub fn is_write_allowed(&self) -> bool { + !self.read_only.unwrap_or(false) + } + + pub fn find_statement(&self, name: &str) -> Option<&PreparedStatement> { + self.statements.as_ref()?.iter().find(|s| s.name == name) + } +} diff --git a/tinycloud-core/src/sql/database.rs b/tinycloud-core/src/sql/database.rs new file mode 100644 index 0000000..dbffff3 --- /dev/null +++ b/tinycloud-core/src/sql/database.rs @@ -0,0 +1,314 @@ +use std::path::PathBuf; + +use rusqlite::hooks::{AuthContext, Authorization}; +use tokio::sync::{mpsc, oneshot}; + +use super::{ + authorizer, + caveats::SqlCaveats, + parser, + storage::{self, StorageMode}, + types::*, +}; + +const MAX_RESPONSE_SIZE: usize = 10 * 1024 * 1024; // 10MB +const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(300); // 5 min + +struct DbMessage { + request: SqlRequest, + caveats: Option, + ability: String, + response_tx: oneshot::Sender>, +} + +#[derive(Clone)] +pub struct DatabaseHandle { + tx: mpsc::Sender, +} + +impl DatabaseHandle { + pub async fn execute( + &self, + request: SqlRequest, + caveats: Option, + ability: String, + ) -> Result { + let (response_tx, response_rx) = oneshot::channel(); + self.tx + .send(DbMessage { + request, + caveats, + ability, + response_tx, + }) + .await + .map_err(|_| SqlError::Internal("Database actor not available".to_string()))?; + response_rx + .await + .map_err(|_| SqlError::Internal("Database actor dropped response".to_string()))? + } +} + +pub fn spawn_actor( + space_id: String, + db_name: String, + base_path: String, + memory_threshold: u64, +) -> DatabaseHandle { + let (tx, mut rx) = mpsc::channel::(32); + + tokio::task::spawn_blocking(move || { + let rt = tokio::runtime::Handle::current(); + let file_path = PathBuf::from(&base_path) + .join(&space_id) + .join(format!("{}.db", db_name)); + + // Check if file already exists -- if so, open from file + let mut mode = if file_path.exists() { + StorageMode::File(file_path.clone()) + } else { + StorageMode::InMemory + }; + let mut conn = storage::open_connection(&mode).expect("Failed to open database"); + + loop { + // Block on receiving with timeout + let msg = + match rt.block_on(async { tokio::time::timeout(IDLE_TIMEOUT, rx.recv()).await }) { + Ok(Some(msg)) => msg, + Ok(None) => break, // Channel closed + Err(_) => break, // Idle timeout + }; + + let result = handle_message(&conn, &msg.request, &msg.caveats, &msg.ability); + + // Post-write promotion check + if result.is_ok() && matches!(mode, StorageMode::InMemory) { + if let Ok(size) = storage::database_size(&conn) { + if size > memory_threshold { + match storage::promote_to_file(&conn, &file_path) { + Ok(new_conn) => { + conn = new_conn; + mode = StorageMode::File(file_path.clone()); + tracing::info!(space=%space_id, db=%db_name, "Promoted database to file storage"); + } + Err(e) => { + tracing::error!(space=%space_id, db=%db_name, error=%e, "Failed to promote database to file"); + } + } + } + } + } + + let _ = msg.response_tx.send(result); + } + + tracing::debug!(space=%space_id, db=%db_name, "Database actor shutting down"); + }); + + DatabaseHandle { tx } +} + +fn handle_message( + conn: &rusqlite::Connection, + request: &SqlRequest, + caveats: &Option, + ability: &str, +) -> Result { + let is_admin = matches!(ability, "tinycloud.sql/admin" | "tinycloud.sql/*"); + + match request { + SqlRequest::Query { sql, params } => { + parser::validate_sql(sql, caveats, ability)?; + + let auth = + authorizer::create_authorizer(caveats.clone(), ability.to_string(), is_admin); + conn.authorizer(Some(auth)); + + let result = execute_query(conn, sql, params); + + conn.authorizer(None::) -> Authorization>); + + result.map(SqlResponse::Query) + } + SqlRequest::Execute { + sql, + params, + schema, + } => { + // Schema init + if let Some(schema_stmts) = schema { + for stmt_sql in schema_stmts { + parser::validate_sql(stmt_sql, caveats, ability)?; + let auth = authorizer::create_authorizer( + caveats.clone(), + ability.to_string(), + is_admin, + ); + conn.authorizer(Some(auth)); + conn.execute_batch(stmt_sql) + .map_err(|e| SqlError::SchemaError(e.to_string()))?; + conn.authorizer(None::) -> Authorization>); + } + } + + parser::validate_sql(sql, caveats, ability)?; + let auth = + authorizer::create_authorizer(caveats.clone(), ability.to_string(), is_admin); + conn.authorizer(Some(auth)); + + let result = execute_statement(conn, sql, params); + + conn.authorizer(None::) -> Authorization>); + + result.map(SqlResponse::Execute) + } + SqlRequest::Batch { statements } => { + for stmt in statements { + parser::validate_sql(&stmt.sql, caveats, ability)?; + } + + let auth = + authorizer::create_authorizer(caveats.clone(), ability.to_string(), is_admin); + conn.authorizer(Some(auth)); + + let mut results = Vec::new(); + for stmt in statements { + match execute_statement(conn, &stmt.sql, &stmt.params) { + Ok(result) => results.push(result), + Err(e) => { + conn.authorizer(None::) -> Authorization>); + return Err(e); + } + } + } + + conn.authorizer(None::) -> Authorization>); + + Ok(SqlResponse::Batch(BatchResponse { results })) + } + SqlRequest::ExecuteStatement { name, params } => { + let caveats_ref = caveats + .as_ref() + .ok_or_else(|| SqlError::InvalidStatement("No caveats found".to_string()))?; + let prepared = caveats_ref.find_statement(name).ok_or_else(|| { + SqlError::InvalidStatement(format!("Statement '{}' not found", name)) + })?; + + parser::validate_sql(&prepared.sql, caveats, ability)?; + + let auth = + authorizer::create_authorizer(caveats.clone(), ability.to_string(), is_admin); + conn.authorizer(Some(auth)); + + let result = if prepared + .sql + .trim_start() + .to_uppercase() + .starts_with("SELECT") + { + execute_query(conn, &prepared.sql, params).map(SqlResponse::Query) + } else { + execute_statement(conn, &prepared.sql, params).map(SqlResponse::Execute) + }; + + conn.authorizer(None::) -> Authorization>); + + result + } + SqlRequest::Export => Err(SqlError::Internal( + "Export should be handled by service".to_string(), + )), + } +} + +fn sql_value_to_rusqlite(v: &SqlValue) -> rusqlite::types::Value { + rusqlite::types::Value::from(v) +} + +fn row_to_sql_value(row: &rusqlite::Row, idx: usize) -> Result { + let value: rusqlite::types::Value = + row.get(idx).map_err(|e| SqlError::Sqlite(e.to_string()))?; + Ok(SqlValue::from(value)) +} + +fn estimate_value_size(val: &SqlValue) -> usize { + match val { + SqlValue::Null => 4, // "null" + SqlValue::Integer(_) => 8, // up to 20 digits + SqlValue::Real(_) => 8, + SqlValue::Text(s) => s.len() + 2, // quotes + SqlValue::Blob(b) => b.len() * 2, // hex encoding overhead + } +} + +fn execute_query( + conn: &rusqlite::Connection, + sql: &str, + params: &[SqlValue], +) -> Result { + let mut stmt = conn + .prepare(sql) + .map_err(|e| SqlError::Sqlite(e.to_string()))?; + + let columns: Vec = stmt.column_names().into_iter().map(String::from).collect(); + + let rusqlite_params: Vec = + params.iter().map(sql_value_to_rusqlite).collect(); + let param_refs: Vec<&dyn rusqlite::types::ToSql> = rusqlite_params + .iter() + .map(|p| p as &dyn rusqlite::types::ToSql) + .collect(); + + let mut rows = Vec::new(); + let mut size_estimate: usize = 0; + + let mut query_rows = stmt + .query(param_refs.as_slice()) + .map_err(|e| SqlError::Sqlite(e.to_string()))?; + + while let Some(row) = query_rows + .next() + .map_err(|e| SqlError::Sqlite(e.to_string()))? + { + let mut values = Vec::new(); + for i in 0..columns.len() { + let val = row_to_sql_value(row, i)?; + size_estimate += estimate_value_size(&val); + values.push(val); + } + rows.push(values); + + if size_estimate > MAX_RESPONSE_SIZE { + return Err(SqlError::ResponseTooLarge(size_estimate as u64)); + } + } + + let row_count = rows.len(); + Ok(QueryResponse { + columns, + rows, + row_count, + }) +} + +fn execute_statement( + conn: &rusqlite::Connection, + sql: &str, + params: &[SqlValue], +) -> Result { + let rusqlite_params: Vec = + params.iter().map(sql_value_to_rusqlite).collect(); + let param_refs: Vec<&dyn rusqlite::types::ToSql> = rusqlite_params + .iter() + .map(|p| p as &dyn rusqlite::types::ToSql) + .collect(); + + conn.execute(sql, param_refs.as_slice()) + .map_err(|e| SqlError::Sqlite(e.to_string()))?; + + Ok(ExecuteResponse { + changes: conn.changes(), + last_insert_row_id: conn.last_insert_rowid(), + }) +} diff --git a/tinycloud-core/src/sql/mod.rs b/tinycloud-core/src/sql/mod.rs new file mode 100644 index 0000000..0fb35ae --- /dev/null +++ b/tinycloud-core/src/sql/mod.rs @@ -0,0 +1,13 @@ +pub mod authorizer; +pub mod caveats; +pub mod database; +pub mod parser; +pub mod service; +pub mod storage; +pub mod types; + +pub use caveats::SqlCaveats; +pub use service::SqlService; +pub use types::{ + BatchResponse, ExecuteResponse, QueryResponse, SqlError, SqlRequest, SqlResponse, SqlValue, +}; diff --git a/tinycloud-core/src/sql/parser.rs b/tinycloud-core/src/sql/parser.rs new file mode 100644 index 0000000..9faf021 --- /dev/null +++ b/tinycloud-core/src/sql/parser.rs @@ -0,0 +1,234 @@ +use sqlparser::ast::*; +use sqlparser::dialect::SQLiteDialect; +use sqlparser::parser::Parser; + +use super::caveats::SqlCaveats; +use super::types::SqlError; + +pub struct ParsedQuery { + pub statements: Vec, + pub referenced_tables: Vec, + pub referenced_columns: Vec, + pub is_read_only: bool, + pub is_ddl: bool, +} + +pub fn validate_sql( + sql: &str, + caveats: &Option, + ability: &str, +) -> Result { + let dialect = SQLiteDialect {}; + let statements = + Parser::parse_sql(&dialect, sql).map_err(|e| SqlError::ParseError(e.to_string()))?; + + if statements.is_empty() { + return Err(SqlError::ParseError("Empty SQL statement".to_string())); + } + + let mut tables = Vec::new(); + let mut columns = Vec::new(); + let mut is_read_only = true; + let mut is_ddl = false; + + for stmt in &statements { + match stmt { + Statement::Query(_) => { + extract_tables_from_statement(stmt, &mut tables); + extract_columns_from_statement(stmt, &mut columns); + } + Statement::Insert { .. } => { + is_read_only = false; + extract_tables_from_statement(stmt, &mut tables); + } + Statement::Update { .. } => { + is_read_only = false; + extract_tables_from_statement(stmt, &mut tables); + extract_columns_from_statement(stmt, &mut columns); + } + Statement::Delete { .. } => { + is_read_only = false; + extract_tables_from_statement(stmt, &mut tables); + } + Statement::CreateTable { .. } + | Statement::AlterTable { .. } + | Statement::Drop { .. } + | Statement::CreateIndex { .. } => { + is_read_only = false; + is_ddl = true; + extract_tables_from_statement(stmt, &mut tables); + } + Statement::AttachDatabase { .. } => { + return Err(SqlError::PermissionDenied( + "ATTACH is not allowed".to_string(), + )); + } + _ => { + return Err(SqlError::PermissionDenied(format!( + "Statement type not allowed: {}", + stmt + ))); + } + } + } + + // Validate ability vs operation type + if is_ddl + && !matches!( + ability, + "tinycloud.sql/admin" | "tinycloud.sql/write" | "tinycloud.sql/*" + ) + { + return Err(SqlError::PermissionDenied( + "DDL operations require admin or write ability".to_string(), + )); + } + + if !is_read_only && matches!(ability, "tinycloud.sql/read" | "tinycloud.sql/select") { + return Err(SqlError::ReadOnlyViolation); + } + + // Validate caveats + if let Some(caveats) = caveats { + if caveats.read_only.unwrap_or(false) && !is_read_only { + return Err(SqlError::ReadOnlyViolation); + } + + for table in &tables { + if !caveats.is_table_allowed(table) { + return Err(SqlError::PermissionDenied(format!( + "Access to table '{}' is not allowed", + table + ))); + } + } + + for column in &columns { + if !caveats.is_column_allowed(column) { + return Err(SqlError::PermissionDenied(format!( + "Access to column '{}' is not allowed", + column + ))); + } + } + } + + tables.dedup(); + columns.dedup(); + + Ok(ParsedQuery { + statements, + referenced_tables: tables, + referenced_columns: columns, + is_read_only, + is_ddl, + }) +} + +fn extract_tables_from_statement(stmt: &Statement, tables: &mut Vec) { + match stmt { + Statement::Query(query) => { + extract_tables_from_query(query, tables); + } + Statement::Insert { table_name, .. } => { + tables.push(table_name.to_string()); + } + Statement::Update { table, .. } => { + extract_tables_from_table_with_joins(table, tables); + } + Statement::Delete { from, .. } => match from { + FromTable::WithFromKeyword(from_items) | FromTable::WithoutKeyword(from_items) => { + for item in from_items { + extract_tables_from_table_with_joins(item, tables); + } + } + }, + Statement::CreateTable { name, .. } => { + tables.push(name.to_string()); + } + Statement::AlterTable { name, .. } => { + tables.push(name.to_string()); + } + Statement::Drop { names, .. } => { + for name in names { + tables.push(name.to_string()); + } + } + Statement::CreateIndex { table_name, .. } => { + tables.push(table_name.to_string()); + } + _ => {} + } +} + +fn extract_tables_from_query(query: &Query, tables: &mut Vec) { + extract_tables_from_set_expr(&query.body, tables); +} + +fn extract_tables_from_set_expr(body: &SetExpr, tables: &mut Vec) { + match body { + SetExpr::Select(select) => { + for item in &select.from { + extract_tables_from_table_with_joins(item, tables); + } + } + SetExpr::SetOperation { left, right, .. } => { + extract_tables_from_set_expr(left, tables); + extract_tables_from_set_expr(right, tables); + } + SetExpr::Query(query) => { + extract_tables_from_query(query, tables); + } + _ => {} + } +} + +fn extract_tables_from_table_with_joins(twj: &TableWithJoins, tables: &mut Vec) { + extract_tables_from_table_factor(&twj.relation, tables); + for join in &twj.joins { + extract_tables_from_table_factor(&join.relation, tables); + } +} + +fn extract_tables_from_table_factor(factor: &TableFactor, tables: &mut Vec) { + match factor { + TableFactor::Table { name, .. } => { + tables.push(name.to_string()); + } + TableFactor::Derived { subquery, .. } => { + extract_tables_from_query(subquery, tables); + } + TableFactor::NestedJoin { + table_with_joins, .. + } => { + extract_tables_from_table_with_joins(table_with_joins, tables); + } + _ => {} + } +} + +fn extract_columns_from_statement(stmt: &Statement, columns: &mut Vec) { + match stmt { + Statement::Query(query) => { + extract_columns_from_query(query, columns); + } + Statement::Update { assignments, .. } => { + for assignment in assignments { + for id in &assignment.id { + columns.push(id.value.clone()); + } + } + } + _ => {} + } +} + +fn extract_columns_from_query(query: &Query, columns: &mut Vec) { + if let SetExpr::Select(select) = &*query.body { + for item in &select.projection { + if let SelectItem::UnnamedExpr(Expr::Identifier(ident)) = item { + columns.push(ident.value.clone()); + } + } + } +} diff --git a/tinycloud-core/src/sql/service.rs b/tinycloud-core/src/sql/service.rs new file mode 100644 index 0000000..c03151e --- /dev/null +++ b/tinycloud-core/src/sql/service.rs @@ -0,0 +1,68 @@ +use std::sync::Arc; + +use dashmap::DashMap; +use tinycloud_lib::resource::SpaceId; + +use super::{ + caveats::SqlCaveats, + database::{spawn_actor, DatabaseHandle}, + types::*, +}; + +pub struct SqlService { + databases: Arc>, + base_path: String, + memory_threshold: u64, +} + +impl SqlService { + pub fn new(base_path: String, memory_threshold: u64) -> Self { + Self { + databases: Arc::new(DashMap::new()), + base_path, + memory_threshold, + } + } + + pub async fn execute( + &self, + space: &SpaceId, + db_name: &str, + request: SqlRequest, + caveats: Option, + ability: String, + ) -> Result { + let key = (space.to_string(), db_name.to_string()); + let handle = self + .databases + .entry(key) + .or_insert_with(|| { + spawn_actor( + space.to_string(), + db_name.to_string(), + self.base_path.clone(), + self.memory_threshold, + ) + }) + .clone(); + + handle.execute(request, caveats, ability).await + } + + pub async fn export(&self, space: &SpaceId, db_name: &str) -> Result, SqlError> { + let path = std::path::PathBuf::from(&self.base_path) + .join(space.to_string()) + .join(format!("{}.db", db_name)); + + if !path.exists() { + return Err(SqlError::DatabaseNotFound); + } + + std::fs::read(&path).map_err(|e| SqlError::Internal(e.to_string())) + } + + pub fn db_name_from_path(path: Option<&str>) -> String { + path.map(|p| p.split('/').next_back().unwrap_or("default").to_string()) + .unwrap_or_else(|| "default".to_string()) + } +} diff --git a/tinycloud-core/src/sql/storage.rs b/tinycloud-core/src/sql/storage.rs new file mode 100644 index 0000000..f598e01 --- /dev/null +++ b/tinycloud-core/src/sql/storage.rs @@ -0,0 +1,74 @@ +use std::path::PathBuf; + +use rusqlite::Connection; + +use super::types::SqlError; + +#[derive(Debug, Clone)] +pub enum StorageMode { + InMemory, + File(PathBuf), +} + +pub fn open_connection(mode: &StorageMode) -> Result { + let conn = match mode { + StorageMode::InMemory => { + Connection::open_in_memory().map_err(|e| SqlError::Internal(e.to_string()))? + } + StorageMode::File(path) => { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent).map_err(|e| SqlError::Internal(e.to_string()))?; + } + Connection::open(path).map_err(|e| SqlError::Internal(e.to_string()))? + } + }; + + // Enable WAL mode for file-backed databases + if matches!(mode, StorageMode::File(_)) { + conn.pragma_update(None, "journal_mode", "wal") + .map_err(|e| SqlError::Internal(e.to_string()))?; + } + + // Enable foreign keys + conn.pragma_update(None, "foreign_keys", "ON") + .map_err(|e| SqlError::Internal(e.to_string()))?; + + Ok(conn) +} + +pub fn database_size(conn: &Connection) -> Result { + let page_count: u64 = conn + .pragma_query_value(None, "page_count", |row| row.get(0)) + .map_err(|e| SqlError::Internal(e.to_string()))?; + let page_size: u64 = conn + .pragma_query_value(None, "page_size", |row| row.get(0)) + .map_err(|e| SqlError::Internal(e.to_string()))?; + Ok(page_count * page_size) +} + +pub fn promote_to_file(conn: &Connection, path: &PathBuf) -> Result { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent).map_err(|e| SqlError::Internal(e.to_string()))?; + } + + let mut file_conn = Connection::open(path).map_err(|e| SqlError::Internal(e.to_string()))?; + + // Use SQLite backup API + { + let backup = rusqlite::backup::Backup::new(conn, &mut file_conn) + .map_err(|e: rusqlite::Error| SqlError::Internal(e.to_string()))?; + backup + .run_to_completion(5, std::time::Duration::from_millis(250), None) + .map_err(|e: rusqlite::Error| SqlError::Internal(e.to_string()))?; + } + + // Enable WAL on the new file + file_conn + .pragma_update(None, "journal_mode", "wal") + .map_err(|e| SqlError::Internal(e.to_string()))?; + file_conn + .pragma_update(None, "foreign_keys", "ON") + .map_err(|e| SqlError::Internal(e.to_string()))?; + + Ok(file_conn) +} diff --git a/tinycloud-core/src/sql/types.rs b/tinycloud-core/src/sql/types.rs new file mode 100644 index 0000000..2c0dff5 --- /dev/null +++ b/tinycloud-core/src/sql/types.rs @@ -0,0 +1,208 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "action")] +pub enum SqlRequest { + #[serde(rename = "query")] + Query { + sql: String, + #[serde(default)] + params: Vec, + }, + #[serde(rename = "execute")] + Execute { + sql: String, + #[serde(default)] + params: Vec, + #[serde(default)] + schema: Option>, + }, + #[serde(rename = "batch")] + Batch { statements: Vec }, + #[serde(rename = "executeStatement")] + ExecuteStatement { + name: String, + #[serde(default)] + params: Vec, + }, + #[serde(rename = "export")] + Export, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SqlStatement { + pub sql: String, + #[serde(default)] + pub params: Vec, +} + +#[derive(Debug, Clone)] +pub enum SqlValue { + Null, + Integer(i64), + Real(f64), + Text(String), + Blob(Vec), +} + +impl Serialize for SqlValue { + fn serialize(&self, serializer: S) -> Result { + match self { + SqlValue::Null => serializer.serialize_none(), + SqlValue::Integer(i) => serializer.serialize_i64(*i), + SqlValue::Real(f) => serializer.serialize_f64(*f), + SqlValue::Text(s) => serializer.serialize_str(s), + SqlValue::Blob(b) => serializer.serialize_bytes(b), + } + } +} + +impl<'de> Deserialize<'de> for SqlValue { + fn deserialize>(deserializer: D) -> Result { + struct SqlValueVisitor; + + impl<'de> serde::de::Visitor<'de> for SqlValueVisitor { + type Value = SqlValue; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a SQL value (null, integer, real, string, or byte array)") + } + + fn visit_unit(self) -> Result { + Ok(SqlValue::Null) + } + + fn visit_none(self) -> Result { + Ok(SqlValue::Null) + } + + fn visit_some>( + self, + deserializer: D, + ) -> Result { + Deserialize::deserialize(deserializer) + } + + fn visit_bool(self, v: bool) -> Result { + Ok(SqlValue::Integer(if v { 1 } else { 0 })) + } + + fn visit_i64(self, v: i64) -> Result { + Ok(SqlValue::Integer(v)) + } + + fn visit_u64(self, v: u64) -> Result { + Ok(SqlValue::Integer(v as i64)) + } + + fn visit_f64(self, v: f64) -> Result { + Ok(SqlValue::Real(v)) + } + + fn visit_str(self, v: &str) -> Result { + Ok(SqlValue::Text(v.to_string())) + } + + fn visit_string(self, v: String) -> Result { + Ok(SqlValue::Text(v)) + } + + fn visit_bytes(self, v: &[u8]) -> Result { + Ok(SqlValue::Blob(v.to_vec())) + } + + fn visit_byte_buf(self, v: Vec) -> Result { + Ok(SqlValue::Blob(v)) + } + + fn visit_seq>( + self, + mut seq: A, + ) -> Result { + let mut bytes = Vec::new(); + while let Some(byte) = seq.next_element::()? { + bytes.push(byte); + } + Ok(SqlValue::Blob(bytes)) + } + } + + deserializer.deserialize_any(SqlValueVisitor) + } +} + +impl From for SqlValue { + fn from(v: rusqlite::types::Value) -> Self { + match v { + rusqlite::types::Value::Null => SqlValue::Null, + rusqlite::types::Value::Integer(i) => SqlValue::Integer(i), + rusqlite::types::Value::Real(f) => SqlValue::Real(f), + rusqlite::types::Value::Text(s) => SqlValue::Text(s), + rusqlite::types::Value::Blob(b) => SqlValue::Blob(b), + } + } +} + +impl From<&SqlValue> for rusqlite::types::Value { + fn from(v: &SqlValue) -> Self { + match v { + SqlValue::Null => rusqlite::types::Value::Null, + SqlValue::Integer(i) => rusqlite::types::Value::Integer(*i), + SqlValue::Real(f) => rusqlite::types::Value::Real(*f), + SqlValue::Text(s) => rusqlite::types::Value::Text(s.clone()), + SqlValue::Blob(b) => rusqlite::types::Value::Blob(b.clone()), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum SqlResponse { + Query(QueryResponse), + Execute(ExecuteResponse), + Batch(BatchResponse), +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct QueryResponse { + pub columns: Vec, + pub rows: Vec>, + pub row_count: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecuteResponse { + pub changes: u64, + pub last_insert_row_id: i64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BatchResponse { + pub results: Vec, +} + +#[derive(Debug, thiserror::Error)] +pub enum SqlError { + #[error("SQLite error: {0}")] + Sqlite(String), + #[error("Permission denied: {0}")] + PermissionDenied(String), + #[error("Database not found")] + DatabaseNotFound, + #[error("Response too large: {0} bytes")] + ResponseTooLarge(u64), + #[error("Quota exceeded")] + QuotaExceeded, + #[error("Invalid statement: {0}")] + InvalidStatement(String), + #[error("Schema error: {0}")] + SchemaError(String), + #[error("Read-only violation")] + ReadOnlyViolation, + #[error("Parse error: {0}")] + ParseError(String), + #[error("Internal error: {0}")] + Internal(String), +}