Rust Client
Getting Started
Installation
cargo add solidb-client
Requirements: Rust 1.56 or higher. The client uses tokio for async runtime and rmp-serde for MessagePack serialization.
Quick Start
use solidb_client::{SoliDBClient, SoliDBClientBuilder};
use serde_json::json;
#[tokio::main]
async fn main() -> Result<(), solidb_client::DriverError> {
// Create client using builder for automatic auth
let mut client = SoliDBClientBuilder::new("localhost:6745")
.auth("mydb", "admin", "password")
.timeout_ms(5000)
.build()
.await?;
// Or connect manually
// let mut client = SoliDBClient::connect("localhost:6745").await?;
// client.auth("mydb", "admin", "password").await?;
// Basic CRUD operations
let doc = client.insert("mydb", "users", None, json!({
"name": "Alice",
"age": 30,
})).await?;
println!("Created: {}", doc["_key"]);
let user = client.get("mydb", "users", doc["_key"].as_str().unwrap()).await?;
println!("Retrieved: {}", user["name"]);
client.update("mydb", "users", doc["_key"].as_str().unwrap(), json!({
"age": 31,
}), true).await?;
// Query with SDBQL
let results = client.query(
"mydb",
"FOR u IN users FILTER u.age > @min RETURN u",
Some(vec![("min", json!(25))].into_iter().collect())
).await?;
println!("Found {} users", results.len());
// Ping for health check
let version = client.ping().await?;
println!("Server version: {}", version);
Ok(())
}
Connection Management
// Connect with address string (host:port)
let mut client = SoliDBClient::connect("localhost:6745").await?;
// Check connection health
let timestamp = client.ping().await?;
println!("Server active at: {}", timestamp);
// Connection is automatically closed when client is dropped
// or use explicit cleanup
drop(client);
| Method | Returns | Description |
|---|---|---|
connect(addr) | Result<Self> | Create and connect client |
ping() | Result<i64> | Check connection health |
Authentication
// Authenticate with database, username, and password
client.auth("mydb", "admin", "password").await?;
// Or use builder for automatic authentication
let mut client = SoliDBClientBuilder::new("localhost:6745")
.auth("mydb", "admin", "password")
.build()
.await?;
// Authentication is required for most operations
// The session remains authenticated until disconnected
Core Operations
Database Operations
// Note: Database operations are handled at connection/auth time
// The client uses the database specified during auth()
// List databases (requires _system access)
let databases = client.list_databases().await?;
// => vec!["_system", "mydb", "testdb"]
// Create a new database
client.create_database("analytics").await?;
// Delete a database
client.delete_database("old_db").await?;
| Method | Returns | Description |
|---|---|---|
list_databases() | Result<Vec<String>> | List all database names |
create_database(name) | Result<()> | Create new database |
delete_database(name) | Result<()> | Delete database |
Collection Operations
// List collections in a database
let collections = client.list_collections("mydb").await?;
// => vec!["users", "orders", "products"]
// Create a document collection
client.create_collection("mydb", "products", None).await?;
// Create an edge collection (for graphs)
client.create_collection("mydb", "relationships", Some("edge")).await?;
// Delete a collection
client.delete_collection("mydb", "old_collection").await?;
// Get collection statistics
let stats = client.collection_stats("mydb", "users").await?;
println!("Collection count: {}", stats["document_count"]);
| Method | Returns | Description |
|---|---|---|
list_collections(db) | Result<Vec<String>> | List collections in database |
create_collection(db, name, *type) | Result<()> | Create collection (type: None/"edge") |
delete_collection(db, name) | Result<()> | Delete collection |
collection_stats(db, name) | Result<Value> | Get collection statistics |
Document Operations (CRUD)
use serde_json::json;
// INSERT - Create a new document (auto-generated key)
let doc = client.insert("mydb", "users", None, json!({
"name": "Alice",
"email": "[email protected]",
"age": 30,
})).await?;
println!("{}", doc["_key"]); // Auto-generated key
// INSERT with custom key
let doc = client.insert("mydb", "users", Some("custom-key-123"), json!({
"name": "Bob",
})).await?;
// GET - Retrieve a document by key
let user = client.get("mydb", "users", "custom-key-123").await?;
// => json!({"_key": "custom-key-123", "name": "Bob", ...})
// UPDATE - Modify a document (merge = true for partial update)
client.update("mydb", "users", "custom-key-123", json!({
"age": 25,
}), true).await?;
// UPDATE - Replace entire document (merge = false)
client.update("mydb", "users", "custom-key-123", json!({
"name": "Robert",
"email": "[email protected]",
"age": 30,
}), false).await?;
// DELETE - Remove a document
client.delete("mydb", "users", "custom-key-123").await?;
// LIST - Paginated document listing
let (docs, total) = client.list("mydb", "users", Some(50), Some(0)).await?;
// limit: 50, offset: 0, returns (documents, total_count)
| Method | Returns | Description |
|---|---|---|
insert(db, col, key, doc) | Result<Value> | Insert document, returns doc with _key |
get(db, col, key) | Result<Value> | Get document by key |
update(db, col, key, doc, merge) | Result<Value> | Update document (merge or replace) |
delete(db, col, key) | Result<()> | Delete document |
list(db, col, limit, offset) | Result<(Vec<Value>, usize)> | List documents with pagination |
SDBQL Queries
use std::collections::HashMap;
use serde_json::json;
// Simple query
let users = client.query("mydb", "FOR u IN users RETURN u", None).await?;
// Query with bind variables (recommended for security)
let mut bind_vars = HashMap::new();
bind_vars.insert("min_age".to_string(), json!(18));
bind_vars.insert("status".to_string(), json!("active"));
bind_vars.insert("limit".to_string(), json!(100));
let results = client.query("mydb", r#"
FOR u IN users
FILTER u.age >= @min_age AND u.status == @status
SORT u.created_at DESC
LIMIT @limit
RETURN { name: u.name, email: u.email }
"#,
Some(bind_vars)
).await?;
// Aggregation query
let stats = client.query("mydb", r#"
FOR u IN users
COLLECT status = u.status WITH COUNT INTO count
RETURN { status, count }
"#, None).await?;
// Join query
let orders = client.query("mydb", r#"
FOR o IN orders
FOR u IN users FILTER u._key == o.user_id
RETURN { order: o, user: u.name }
"#, None).await?;
// Explain query plan
let plan = client.explain("mydb", "FOR u IN users RETURN u", None).await?;
ACID Transactions
use solidb_client::IsolationLevel;
// Begin a transaction
let tx_id = client.begin_transaction("mydb", None).await?;
let tx_id = client.begin_transaction("mydb", Some(IsolationLevel::ReadCommitted)).await?;
// Isolation levels: ReadUncommitted, ReadCommitted, RepeatableRead, Serializable
// Perform operations within transaction
let _ = client.insert("mydb", "accounts", None, json!({
"id": 1, "balance": 1000,
})).await?;
let _ = client.insert("mydb", "accounts", None, json!({
"id": 2, "balance": 500,
})).await?;
// Check transaction state
println!("In transaction: {}", client.in_transaction());
println!("Transaction ID: {:?}", client.transaction_id());
// Commit if all operations succeed
client.commit().await?;
println!("Transaction committed successfully");
// Or rollback on error
// client.rollback().await?;
| Method | Returns | Description |
|---|---|---|
begin_transaction(db, *isolation) | Result<String> | Start transaction, returns tx_id |
commit() | Result<()> | Commit transaction |
rollback() | Result<()> | Rollback transaction |
in_transaction() | bool | Check if in transaction |
transaction_id() | Option<&str> | Get current transaction ID |
Management Sub-Clients
Sub-clients provide namespaced access to management APIs. Important: All management operations require database context.
client.scripts()
Lua Script Endpoints// Create a Lua script endpoint
let script = client.scripts().create(
"hello", // name
"/api/hello", // path
vec!["GET", "POST"], // methods
r#"return { message = "Hello!" }"#, // code
None, // description (optional)
None, // collection (optional)
).await?;
println!("Created script: {}", script["_key"]);
// List all scripts
let scripts = client.scripts().list().await?;
for s in &scripts {
println!("{} -> {}", s["name"], s["path"]);
}
// Get a specific script
let script = client.scripts().get("script_key").await?;
// Update script
client.scripts().update("script_key", json!({
"code": r#"return { message: "Updated!" }"#,
"methods": vec!["GET"],
})).await?;
// Delete a script
client.scripts().delete("script_key").await?;
// Get execution statistics
let stats = client.scripts().get_stats().await?;
| Method | Parameters | Description |
|---|---|---|
create | name, path, methods, code, *desc, *col | Create Lua endpoint |
list() | - | List all scripts |
get(scriptID) | scriptID | Get script details |
update(scriptID, updates) | scriptID, Value | Update script properties |
delete(scriptID) | scriptID | Delete script |
get_stats() | - | Execution statistics |
client.jobs() & client.cron()
Background Processing// === JOBS ===
// List all queues
let queues = client.jobs().list_queues().await?;
// => vec![QueueInfo { name: "default", pending: 5, running: 2 }, ...]
// List jobs in a queue with filters
let jobs = client.jobs().list_jobs(
"default", // queue name
None, // status filter (pending, running, completed, failed)
None, // limit
None, // offset
).await?;
// Enqueue a new job
let job = client.jobs().enqueue(
"default", // queue name
"/scripts/process-order", // script path
json!({"order_id": 123}), // params
Some(10), // priority (higher = more urgent)
None, // run_at (ISO8601 for delayed)
).await?;
println!("Job ID: {}", job["_key"]);
// Get job details
let job = client.jobs().get_job("job_id").await?;
println!("Status: {}", job["status"]);
// Cancel a pending job
client.jobs().cancel("job_id").await?;
// === CRON ===
// List scheduled jobs
let crons = client.cron().list().await?;
// Create a cron job
let cron = client.cron().create(
"daily-cleanup", // name
"0 2 * * *", // schedule (every day at 2 AM)
"/scripts/cleanup", // script path
None, // params
None, // description
).await?;
// Update cron schedule
client.cron().update("cron_id", json!({
"schedule": "0 3 * * *", // change to 3 AM
})).await?;
// Delete cron job
client.cron().delete("cron_id").await?;
client.triggers()
Database Triggers// List all triggers
let triggers = client.triggers().list().await?;
// List triggers for a specific collection
let triggers = client.triggers().list_by_collection("users").await?;
// Create a trigger
let trigger = client.triggers().create(
"on_user_created", // name
"users", // collection
"insert", // operation: insert, update, delete
"/scripts/on-user-create", // script path
None, // description
).await?;
// Get trigger details
let trigger = client.triggers().get("trigger_id").await?;
// Update trigger
client.triggers().update("trigger_id", json!({
"script_path": "/scripts/new-handler",
"enabled": false,
})).await?;
// Toggle trigger on/off
client.triggers().toggle("trigger_id").await?;
// Delete trigger
client.triggers().delete("trigger_id").await?;
| Event | Description |
|---|---|
insert | Fires on document creation |
update | Fires on document modification |
delete | Fires on document removal |
client.roles() & client.users()
Role-Based Access Control// === ROLES ===
// List all roles
let roles = client.roles().list().await?;
// Create a role with permissions
let role = client.roles().create(
"editor",
vec![
json!({"action": "read", "scope": "database", "database": "mydb"}),
json!({"action": "write", "scope": "collection", "database": "mydb", "collection": "articles"}),
json!({"action": "execute", "scope": "script", "database": "mydb"}),
],
None, // description
).await?;
// Get role details
let role = client.roles().get("editor").await?;
// Update role permissions
client.roles().update("editor", vec![
json!({"action": "read", "scope": "database", "database": "mydb"}),
json!({"action": "write", "scope": "database", "database": "mydb"}),
], None).await?;
// Delete role
client.roles().delete("editor").await?;
// === USERS ===
// List all users
let users = client.users().list().await?;
// Create a user
let user = client.users().create("john", "secure_password", None).await?;
// Get user's assigned roles
let roles = client.users().get_roles("john").await?;
// Assign a role to user
client.users().assign_role("john", "editor").await?;
// Revoke a role from user
client.users().revoke_role("john", "editor").await?;
// Get current authenticated user
let me = client.users().me().await?;
// Get current user's permissions
let permissions = client.users().my_permissions().await?;
// Delete user
client.users().delete("john").await?;
| Action | Scopes | Description |
|---|---|---|
read | database, collection | Read documents and query |
write | database, collection | Create, update, delete documents |
admin | database, collection | Manage indexes, schema, etc. |
execute | script | Execute Lua scripts |
client.api_keys()
API Key Management// Create API key
let key = client.api_keys().create(
"my-api-key", // name
vec!["mydb"], // databases
None, // expiration
).await?;
println!("API Key: {}", key["key"]); // Save this! Only shown once
// List all API keys
let keys = client.api_keys().list().await?;
// Delete API key
client.api_keys().delete("key_id").await?;
Advanced Features
client.vector()
Vector Search & AI// Create a vector index
let metric = "cosine".to_string(); // cosine, euclidean, dot_product
let idx = client.vector().create_index(
"products", // collection
"product_embeddings", // index name
"embedding", // field
1536, // dimensions
Some(&metric),
).await?;
// Search by vector (semantic search)
let embedding = get_embedding("wireless headphones").await; // Your embedding function
let results = client.vector().search(
"products",
&embedding,
10, // limit
None, // filter
).await?;
for r in &results {
let doc = &r["doc"];
println!("{} - Score: {}", doc["name"], r["score"]);
}
// Search by existing document (find similar)
let similar = client.vector().search_by_document(
"products",
"product-123", // doc key
"embedding", // field
5, // limit
None, // filter
).await?;
// Quantize index (reduce memory usage)
client.vector().quantize("products", "product_embeddings", "binary").await?;
// Dequantize (restore full precision)
client.vector().dequantize("products", "product_embeddings").await?;
// List vector indexes
let indexes = client.vector().list_indexes("products").await?;
// Delete index
client.vector().delete_index("products", "product_embeddings").await?;
client.geo()
Geospatial Queries// Create a geo index
let idx = client.geo().create_index("stores", "location_idx", "location").await?;
// Find nearby locations (radius search)
let nearby = client.geo().near(
"stores",
48.8566, // latitude
2.3522, // longitude
5000, // radius in meters
20, // limit
).await?;
for r in &nearby {
let doc = &r["doc"];
println!("{} - {}m away", doc["name"], r["distance"]);
}
// Find within polygon
let polygon = vec![
vec![48.8, 2.3],
vec![48.9, 2.4],
vec![48.85, 2.35],
vec![48.8, 2.3],
];
let within = client.geo().within("stores", &polygon, None).await?;
// List geo indexes
let indexes = client.geo().list_indexes("stores").await?;
// Delete index
client.geo().delete_index("stores", "location_idx").await?;
client.ttl()
Time-To-Live Indexes// Create TTL index (auto-expire documents after 1 hour)
let idx = client.ttl().create_index(
"sessions", // collection
"session_ttl", // index name
"created_at", // DateTime field to check
3600, // expire after seconds
).await?;
// Update expiration time
client.ttl().update_expiration("sessions", "session_ttl", 7200).await?; // 2 hours
// Get index info
let info = client.ttl().get_index_info("sessions", "session_ttl").await?;
println!("Expires after: {}s", info["expire_after_seconds"]);
// Manually trigger cleanup (normally runs automatically)
client.ttl().run_cleanup("sessions").await?;
// List TTL indexes
let indexes = client.ttl().list_indexes("sessions").await?;
// Delete TTL index
client.ttl().delete_index("sessions", "session_ttl").await?;
client.columnar()
Columnar/Analytics Storage// Create a columnar table (optimized for analytics)
let table = client.columnar().create("metrics", vec![
json!({"name": "timestamp", "type": "datetime"}),
json!({"name": "metric_name", "type": "string"}),
json!({"name": "value", "type": "float"}),
json!({"name": "tags", "type": "string"}),
]).await?;
// Insert rows (batch insert is efficient)
client.columnar().insert("metrics", vec![
json!({"timestamp": "2024-01-15T10:00:00Z", "metric_name": "cpu_usage", "value": 45.2, "tags": "server1"}),
json!({"timestamp": "2024-01-15T10:01:00Z", "metric_name": "cpu_usage", "value": 47.8, "tags": "server1"}),
]).await?;
// Query with SQL-like syntax
let results = client.columnar().query("metrics",
"SELECT * FROM metrics WHERE value > @min ORDER BY timestamp DESC LIMIT 100",
Some(vec![("min", json!(40.0))].into_iter().collect()),
).await?;
// Aggregation
let agg = client.columnar().aggregate("metrics", json!({
"group_by": vec!["metric_name", "tags"],
"metrics": vec![
json!({"column": "value", "function": "avg"}),
json!({"column": "value", "function": "max"}),
json!({"column": "value", "function": "count"}),
],
})).await?;
// Get table statistics
let stats = client.columnar().stats("metrics").await?;
println!("Row count: {}", stats["row_count"]);
// Add a column
client.columnar().add_column("metrics", "host", "string", None).await?;
// Drop a column
client.columnar().drop_column("metrics", "host").await?;
// List all columnar tables
let tables = client.columnar().list().await?;
// Delete table
client.columnar().delete("metrics").await?;
client.cluster()
Cluster Management// Get cluster status
let status = client.cluster().status().await?;
println!("Mode: {}", status["mode"]); // standalone, cluster
println!("Nodes: {}", status["node_count"]);
// Get detailed cluster info
let info = client.cluster().info().await?;
// Remove a node from cluster
client.cluster().remove_node("node-id-to-remove").await?;
// Trigger data rebalancing
client.cluster().rebalance().await?;
// Cleanup orphaned data
client.cluster().cleanup().await?;
// Reshard cluster
client.cluster().reshard(None).await?; // or specify new shard count
client.collections()
Advanced Collection Operations// Truncate collection (delete all documents)
client.collections().truncate("logs").await?;
// Compact collection (reclaim disk space)
client.collections().compact("users").await?;
// Repair collection (fix inconsistencies)
client.collections().repair("orders").await?;
// Get collection statistics
let stats = client.collections().stats("users").await?;
// Set JSON schema validation
client.collections().set_schema("users", json!({
"type": "object",
"required": vec!["name", "email"],
"properties": json!({
"name": json!({"type": "string", "minLength": 1}),
"email": json!({"type": "string", "format": "email"}),
}),
})).await?;
// Get current schema
let schema = client.collections().get_schema("users").await?;
// Remove schema validation
client.collections().delete_schema("users").await?;
// Export collection
let data = client.collections().export("users", None).await?;
// Import data
client.collections().import("users_backup", &data, None).await?;
client.env()
Environment Variables// List environment variables (for Lua scripts)
let vars = client.env().list().await?;
// Set an environment variable
client.env().set("API_KEY", "sk-xxx-your-api-key").await?;
client.env().set("WEBHOOK_URL", "https://example.com/webhook").await?;
// Delete an environment variable
client.env().delete("OLD_VAR").await?;
Error Handling
use solidb_client::{SoliDBClient, DriverError};
#[tokio::main]
async fn main() {
let result = run_operations().await;
match result {
Ok(_) => println!("All operations completed successfully"),
Err(e) => {
match e {
DriverError::ConnectionError(msg) => {
println!("Connection failed: {}", msg);
}
DriverError::ProtocolError(msg) => {
println!("Protocol error: {}", msg);
}
DriverError::DatabaseError(msg) => {
println!("Database error: {}", msg);
}
DriverError::AuthError(msg) => {
println!("Authentication failed: {}", msg);
}
DriverError::TransactionError(msg) => {
println!("Transaction error: {}", msg);
}
DriverError::MessageTooLarge => {
println!("Message exceeds maximum size limit");
}
DriverError::InvalidCommand(msg) => {
println!("Invalid command: {}", msg);
}
}
}
}
}
async fn run_operations() -> Result<(), DriverError> {
let mut client = SoliDBClient::connect("localhost:6745").await?;
// Connection errors
if let Err(e) = client.ping().await {
return Err(e);
}
// Authenticate
if let Err(e) = client.auth("mydb", "user", "password").await {
return Err(e);
}
// Document not found or other server errors
match client.get("mydb", "users", "nonexistent-key").await {
Ok(doc) => println!("Found: {:?}", doc),
Err(DriverError::DatabaseError(msg)) => {
println!("Document not found: {}", msg);
}
Err(e) => return Err(e),
}
Ok(())
}
ConnectionError
Network failures, connection refused, timeouts, disconnections
ProtocolError
Invalid response format, message too large, serialization issues
DatabaseError
Document not found, permission denied, validation errors
AuthError
Invalid credentials, expired session, insufficient permissions
TransactionError
No active transaction, commit/rollback failures
InvalidCommand
Unsupported operation, malformed request
Offline Sync
Build resilient applications that work anywhere. The Rust client provides offline-first capabilities with automatic synchronization, conflict resolution, and local storage using SQLite or RocksDB.
Introduction
Offline Sync enables local-first data operations. All reads and writes happen locally first, then sync automatically when connectivity is available. The sync manager handles version vectors, conflict resolution, and delta encoding transparently.
Works Offline
All operations work locally without network connectivity
Instant Response
Zero-latency reads and writes from local storage
Auto Conflict Resolution
Version vectors ensure data consistency across devices
Setup
use solidb_client::{SoliDBClient, OfflineSyncManager, SyncConfig, ConflictStrategy, StorageEngine};
use serde_json::json;
#[tokio::main]
async fn main() -> Result<(), Box> {
// Create regular client
let client = SoliDBClient::connect("localhost:6745").await?;
// Configure offline sync
let config = SyncConfig {
database: "mydb".to_string(),
local_storage_path: "./local_data".to_string(),
auto_sync: true,
sync_interval_seconds: 30,
conflict_strategy: ConflictStrategy::LastWriteWins,
storage_engine: StorageEngine::Sqlite,
encryption_key: None,
};
// Initialize sync manager
let sync_manager = OfflineSyncManager::new(client, config)
.initialize()
.await?;
// Authenticate (cached for reconnects)
sync_manager.authenticate("admin", "password").await?;
println!("Offline sync ready!");
Ok(())
}
Basic Operations
All CRUD operations work on local data first. Changes are queued for sync automatically when online.
// Insert document locally
let doc = sync_manager.save_document(
"users",
Some("user-123"),
json!({
"name": "Alice",
"email": "[email protected]",
"created_at": "2024-01-15T10:00:00Z"
})
).await?;
// Get document from local store
let user = sync_manager.get_document("users", "user-123").await?;
// Update document (merge mode)
let updated = sync_manager.save_document(
"users",
Some("user-123"),
json!({"email": "[email protected]"})
).await?;
// Delete document locally
sync_manager.delete_document("users", "user-123").await?;
// Query local data with SDBQL
let active_users = sync_manager.query(
"FOR u IN users FILTER u.status == 'active' RETURN u",
None
).await?;
// List documents with pagination
let (docs, total) = sync_manager.list_documents(
"users",
Some(50),
Some(0)
).await?;
| Method | Returns | Description |
|---|---|---|
save_document(col, key, doc) | Result<Value> | Insert or update document locally |
get_document(col, key) | Result<Value> | Get document from local store |
delete_document(col, key) | Result<()> | Delete document locally |
query(sdbql, params) | Result<Vec<Value>> | Execute SDBQL on local data |
list_documents(col, limit, offset) | Result<(Vec, usize)> | List documents with pagination |
Sync Control
While auto-sync handles most cases, you have full control over synchronization and can subscribe to changes.
use solidb_client::{Change, ChangeType};
// Trigger immediate sync
let result = sync_manager.sync_now().await?;
println!("Synced: {} pushed, {} pulled", result.pushed, result.pulled);
// Check sync status
let status = sync_manager.sync_status().await;
println!("Pending: {}, Last sync: {:?}", status.pending_changes, status.last_sync);
// Subscribe to all changes
sync_manager.on_change(|change: Change| {
match change.change_type {
ChangeType::LocalInsert => println!("Local insert: {}", change.document_key),
ChangeType::RemoteInsert => println!("Remote insert: {}", change.document_key),
ChangeType::Conflict => println!("Conflict resolved: {}", change.document_key),
ChangeType::SyncComplete => println!("Sync done: {} up, {} down", change.pushed_count, change.pulled_count),
_ => {}
}
});
// Subscribe to specific collection
sync_manager.on_collection_change("users", |change| {
println!("User changed: {:?}", change);
});
// Pause/resume auto-sync
sync_manager.pause_sync().await?;
// ... do batch work ...
sync_manager.resume_sync().await?;
Conflict Resolution
When the same document is modified on multiple clients, conflicts are automatically resolved using your configured strategy. You can also handle conflicts manually.
// Configure conflict strategy in SyncConfig
let config = SyncConfig {
conflict_strategy: ConflictStrategy::LastWriteWins, // or FirstWriteWins, MergeFields
..Default::default()
};
// Custom conflict resolver
let config = SyncConfig {
conflict_strategy: ConflictStrategy::Custom(Box::new(|local, remote, base| {
let mut resolved = local.clone();
if remote["timestamp"] > local["timestamp"] {
resolved["status"] = remote["status"].clone();
}
Ok(resolved)
})),
..Default::default()
};
// Handle conflicts after sync
let result = sync_manager.sync_now().await?;
if !result.conflicts.is_empty() {
for conflict in &result.conflicts {
println!("Conflict in {}:{}", conflict.collection, conflict.document_key);
// Access local_version, remote_version, base_version
// Manually resolve if needed
}
}
// Set up conflict callback
sync_manager.on_conflict(|conflict| {
// Show UI to user or auto-resolve
ConflictResolution::AcceptRemote // or AcceptLocal, Merge(doc)
});
Configuration Options
SyncConfig {
// Required
database: "mydb".to_string(),
local_storage_path: "./sync_data".to_string(),
// Sync behavior
auto_sync: true, // Enable automatic sync
sync_interval_seconds: 60, // Auto-sync interval
push_on_change: true, // Sync immediately on local changes
// Conflict resolution
conflict_strategy: ConflictStrategy::LastWriteWins,
// Options: LastWriteWins, FirstWriteWins, MergeFields, Custom(...)
// Storage
storage_engine: StorageEngine::Sqlite, // or RocksDB
// Selective sync - only sync matching documents
sync_filter: Some("FOR doc IN users FILTER doc.user_id == @id RETURN doc".to_string()),
sync_filter_params: Some(vec![("id", json!("user-123"))].into_iter().collect()),
// Encryption for local storage (AES-256-GCM)
encryption_key: Some("your-32-byte-secret-key-here!!".to_string()),
}
Tip: Use selective sync to limit local storage usage. Only sync data the current user needs, such as their own documents or recent records.