Add MongoDB support and CLI for migrations and seed

This commit is contained in:
2025-08-19 16:50:19 -04:00
parent 8a05f4edac
commit 899cd073c0
14 changed files with 1710 additions and 52 deletions

2
.env
View File

@@ -2,4 +2,4 @@
RUST_LOG=info
BIND_ADDRESS=127.0.0.1:3000
# DATABASE_URL=postgres://gerard@localhost/db (not used yet)
MONGO_URI=mongodb://localhost:27017

1338
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,13 @@ edition = "2024"
[dependencies]
axum = "0.8.4"
base64 = "0.22.1"
bson = { version = "2.15.0", features = ["chrono-0_4"] }
chrono = { version = "0.4.41", features = ["serde"] }
clap = { version = "4.5.45", features = ["derive"] }
dotenvy = "0.15.7"
mongodb = "3.2.4"
openssl = "0.10.73"
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.142"
sqlx = { version = "0.8.6", features = ["runtime-tokio", "tls-native-tls"] }

View File

@@ -4,17 +4,19 @@ use std::env;
use std::net::SocketAddr;
use std::str::FromStr;
#[cfg(feature = "no-auth")]
use tracing::error;
#[derive(Debug)]
pub struct Config {
pub bind_address: SocketAddr,
// pub database_url: Option<String>,
pub mongodb_uri: String,
}
impl Config {
pub fn from_env() -> Result<Self, String> {
let bind_address_str =
env::var("BIND_ADDRESS").unwrap_or_else(|_| "127.0.0.1:3000".to_string());
let bind_address = SocketAddr::from_str(&bind_address_str)
.map_err(|e| format!("Invalid BIND_ADDRESS: {}", e))?;
@@ -24,11 +26,12 @@ impl Config {
return Err("In no-auth mode, BIND_ADDRESS must be 127.0.0.1".to_string());
}
// let database_url = env::var("DATABASE_URL").ok();
let mongodb_uri =
env::var("MONGODB_URI").unwrap_or_else(|_| "mongodb://localhost:27017".to_string());
Ok(Self {
bind_address,
// database_url,
mongodb_uri,
})
}
}

103
src/db/migrations.rs Normal file
View File

@@ -0,0 +1,103 @@
// src/db/migrations.rs
use mongodb::bson::doc;
use mongodb::{
Client, Database, IndexModel,
options::{ClientOptions, IndexOptions},
};
use tracing::info;
pub struct Migrator {
db: Database,
}
impl Migrator {
pub async fn new(uri: &str) -> Result<Self, mongodb::error::Error> {
let client_options = ClientOptions::parse(uri).await?;
let client = Client::with_options(client_options)?;
let db = client.database("employee_tracking");
Ok(Self { db })
}
pub async fn run(&self) -> Result<(), mongodb::error::Error> {
info!("Running migrations...");
// Users collection
self.db.create_collection("users").await?;
self.db
.collection::<bson::Document>("users")
.create_index(
IndexModel::builder()
.keys(doc! { "email": 1 })
.options(IndexOptions::builder().sparse(true).unique(true).build())
.build(),
)
.await?;
// Employees collection
self.db.create_collection("employees").await?;
self.db
.collection::<bson::Document>("employees")
.create_index(
IndexModel::builder()
.keys(doc! { "email": 1 })
.options(IndexOptions::builder().sparse(true).unique(true).build())
.build(),
)
.await?;
// Punches collection
self.db.create_collection("punches").await?;
self.db
.collection::<bson::Document>("punches")
.create_index(
IndexModel::builder()
.keys(doc! { "employee_id": 1, "clock_out_at": 1 })
.options(
IndexOptions::builder()
.partial_filter_expression(doc! { "clock_out_at": null })
.unique(true)
.build(),
)
.build(),
)
.await?;
// Shifts collection
self.db.create_collection("shifts").await?;
self.db
.collection::<bson::Document>("shifts")
.create_index(
IndexModel::builder()
.keys(doc! { "employee_id": 1, "start_at": 1, "end_at": 1 })
.build(),
)
.await?;
// Leave requests collection
self.db.create_collection("leave_requests").await?;
self.db
.collection::<bson::Document>("leave_requests")
.create_index(
IndexModel::builder()
.keys(doc! { "employee_id": 1, "start_date": 1, "end_date": 1, "status": 1 })
.build(),
)
.await?;
// Inventory items collection (optional)
self.db.create_collection("inventory_items").await?;
self.db
.collection::<bson::Document>("inventory_items")
.create_index(
IndexModel::builder()
.keys(doc! { "sku": 1 })
.options(IndexOptions::builder().unique(true).build())
.build(),
)
.await?;
info!("Migrations completed.");
Ok(())
}
}

4
src/db/mod.rs Normal file
View File

@@ -0,0 +1,4 @@
// src/db/mod.rs
pub mod migrations;
pub mod seed;

138
src/db/seed.rs Normal file
View File

@@ -0,0 +1,138 @@
// src/db/seed.rs
use chrono::Utc;
use mongodb::{
Client, Database,
bson::{DateTime, doc, oid::ObjectId},
};
use tracing::info;
pub struct Seeder {
db: Database,
}
impl Seeder {
pub async fn new(uri: &str) -> Result<Self, mongodb::error::Error> {
let client = Client::with_uri_str(uri).await?;
let db = client.database("employee_tracking");
Ok(Self { db })
}
pub async fn run(&self) -> Result<(), mongodb::error::Error> {
info!("Seeding database...");
// Clear collections
self.db
.collection::<bson::Document>("users")
.delete_many(doc! {})
.await?;
self.db
.collection::<bson::Document>("employees")
.delete_many(doc! {})
.await?;
self.db
.collection::<bson::Document>("punches")
.delete_many(doc! {})
.await?;
self.db
.collection::<bson::Document>("shifts")
.delete_many(doc! {})
.await?;
self.db
.collection::<bson::Document>("leave_requests")
.delete_many(doc! {})
.await?;
// Seed users
let manager_id = ObjectId::new();
self.db
.collection("users")
.insert_one(doc! {
"_id": manager_id.clone(),
"role": "manager",
"email": "manager@example.com"
})
.await?;
// Seed employees
let emp1_id = ObjectId::new();
let emp2_id = ObjectId::new();
self.db
.collection("employees")
.insert_many(vec![
doc! {
"_id": emp1_id.clone(),
"full_name": "John Doe",
"email": "john.doe@example.com",
"position": "Developer",
"active": true,
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
},
doc! {
"_id": emp2_id.clone(),
"full_name": "Jane Smith",
"email": "jane.smith@example.com",
"position": "Designer",
"active": true,
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
},
])
.await?;
// Seed punches
self.db
.collection("punches")
.insert_one(doc! {
"_id": ObjectId::new(),
"employee_id": emp1_id.clone(),
"clock_in_at": DateTime::from_millis(Utc::now().timestamp_millis()),
"clock_out_at": null,
"source": "web",
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
})
.await?;
// Seed shifts
self.db
.collection("shifts")
.insert_one(doc! {
"_id": ObjectId::new(),
"employee_id": emp1_id.clone(),
"start_at": DateTime::from_millis(Utc::now().timestamp_millis()),
"end_at": DateTime::from_millis(Utc::now().timestamp_millis() + 8 * 3600 * 1000),
"created_by": manager_id.clone(),
"notes": "Morning shift",
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
})
.await?;
// Seed leave requests
self.db.collection("leave_requests").insert_many(vec![
doc! {
"_id": ObjectId::new(),
"employee_id": emp1_id.clone(),
"start_date": DateTime::from_millis(Utc::now().timestamp_millis() + 2 * 24 * 3600 * 1000),
"end_date": DateTime::from_millis(Utc::now().timestamp_millis() + 4 * 24 * 3600 * 1000),
"status": "approved",
"reason": "Vacation",
"reviewed_by": manager_id.clone(),
"reviewed_at": DateTime::from_millis(Utc::now().timestamp_millis()),
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
},
doc! {
"_id": ObjectId::new(),
"employee_id": emp2_id.clone(),
"start_date": DateTime::from_millis(Utc::now().timestamp_millis() + 5 * 24 * 3600 * 1000),
"end_date": DateTime::from_millis(Utc::now().timestamp_millis() + 6 * 24 * 3600 * 1000),
"status": "pending",
"reason": "Medical",
"reviewed_by": null,
"reviewed_at": null,
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
},
]).await?;
info!("Seeding completed.");
Ok(())
}
}

View File

@@ -1,4 +1,5 @@
// src/handlers/mod.rs
pub mod health;
pub mod shift;
pub mod user;

View File

@@ -0,0 +1,3 @@
// src/handlers/shift/mod.rs
pub mod shift;

View File

@@ -0,0 +1,80 @@
// src/handlers/shift/shift.rs
use axum::{Extension, Json, http::StatusCode};
use chrono::Utc;
use mongodb::{
Database,
bson::{DateTime, doc},
};
use serde_json::json;
pub async fn create_shift(
Extension(db): Extension<Database>,
Json(payload): Json<serde_json::Value>,
) -> impl axum::response::IntoResponse {
let employee_id = payload
.get("employee_id")
.and_then(|v| v.as_str())
.unwrap_or("");
let start_at = payload
.get("start_at")
.and_then(|v| v.as_i64())
.unwrap_or(0);
let end_at = payload.get("end_at").and_then(|v| v.as_i64()).unwrap_or(0);
// Validate no overlapping shifts
let shifts = db.collection::<bson::Document>("shifts");
let overlap = shifts
.find_one(doc! {
"employee_id": employee_id,
"$or": [
{ "start_at": { "$lte": DateTime::from_millis(end_at) } },
{ "end_at": { "$gte": DateTime::from_millis(start_at) } },
]
})
.await
.unwrap();
if overlap.is_some() {
return (
StatusCode::BAD_REQUEST,
Json(json!({
"message": "Shift overlaps with existing shift",
"success": false,
"error": true
})),
);
}
// Insert shift
let result = shifts
.insert_one(doc! {
"_id": bson::oid::ObjectId::new(),
"employee_id": employee_id,
"start_at": DateTime::from_millis(start_at),
"end_at": DateTime::from_millis(end_at),
"created_by": null,
"notes": payload.get("notes").and_then(|v| v.as_str()).unwrap_or(""),
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
})
.await;
match result {
Ok(_) => (
StatusCode::CREATED,
Json(json!({
"message": "Shift created successfully",
"success": true,
"error": false
})),
),
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({
"message": format!("Failed to create shift: {}", e),
"success": false,
"error": true
})),
),
}
}

View File

@@ -3,17 +3,31 @@
use std::process::exit;
use axum::Router;
use clap::{Parser, Subcommand};
use dotenvy::dotenv;
use mongodb::Client;
use tokio::signal;
use tower_http::trace::TraceLayer;
use tracing::{error, info};
use tracing_subscriber::{EnvFilter, fmt, prelude::*};
mod config;
use crate::config::Config;
mod db; // Updated to import db module instead of migrations and seed
mod handlers;
mod routes;
use config::Config;
#[derive(Parser)]
struct Cli {
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Subcommand)]
enum Commands {
Migrate,
Seed,
}
#[tokio::main]
async fn main() {
@@ -35,6 +49,50 @@ async fn main() {
}
};
// Initialize MongoDB client
let client = match Client::with_uri_str(&config.mongodb_uri).await {
Ok(client) => client,
Err(e) => {
error!("Failed to initialize MongoDB client: {}", e);
exit(1);
}
};
let db = client.database("employee_tracking");
// Handle CLI commands
let cli = Cli::parse();
match cli.command {
Some(Commands::Migrate) => {
let migrator = match db::migrations::Migrator::new(&config.mongodb_uri).await {
Ok(m) => m,
Err(e) => {
error!("Failed to initialize migrator: {}", e);
exit(1);
}
};
migrator.run().await.unwrap_or_else(|e| {
error!("Failed to run migrations: {}", e);
exit(1);
});
return;
}
Some(Commands::Seed) => {
let seeder = match db::seed::Seeder::new(&config.mongodb_uri).await {
Ok(s) => s,
Err(e) => {
error!("Failed to initialize seeder: {}", e);
exit(1);
}
};
seeder.run().await.unwrap_or_else(|e| {
error!("Failed to run seed: {}", e);
exit(1);
});
return;
}
None => {}
}
#[cfg(feature = "no-auth")]
info!("NO-AUTH MODE ENABLED");
@@ -42,9 +100,10 @@ async fn main() {
// Build the Axum router
let app = Router::new()
// .nest("/health", routes::health::health::health_routes())
.nest("/health", routes::health::health::health_routes())
.nest("/user", routes::user::user::user_routes())
.nest("/shift", routes::shift::shift::shift_routes())
.layer(axum::Extension(db)) // Pass MongoDB database to handlers
.layer(TraceLayer::new_for_http());
// Run the server

View File

@@ -1,4 +1,5 @@
// src/routes/mod.rs
pub mod health;
pub mod shift;
pub mod user;

3
src/routes/shift/mod.rs Normal file
View File

@@ -0,0 +1,3 @@
// src/routes/shift/mod.rs
pub mod shift;

View File

@@ -0,0 +1,9 @@
// src/routes/shift/shift.rs
use axum::{Router, routing::post};
use crate::handlers::shift::shift::create_shift;
pub fn shift_routes() -> Router {
Router::new().route("/", post(create_shift))
}