1 Commits

Author SHA1 Message Date
899cd073c0 Add MongoDB support and CLI for migrations and seed 2025-08-19 16:50:19 -04:00
28 changed files with 1450 additions and 804 deletions

4
.env
View File

@@ -1,5 +1,5 @@
# .env.example (copy to .env for local use)
// .env.example (copy to .env for local use)
RUST_LOG=info
BIND_ADDRESS=127.0.0.1:3000
MONGO_URI=mongodb://mongodb:27017
MONGO_URI=mongodb://localhost:27017

1097
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +1,24 @@
[package]
name = "purenotify_backend"
name = "employee-tracking-backend"
version = "0.1.0"
edition = "2024"
[dependencies]
axum = "0.8.4"
base64 = "0.22.1"
bson = { version = "2.15.0", features = ["chrono-0_4"] }
chrono = { version = "0.4.41", features = ["serde"] }
sha2 = "0.10.9"
rand = "0.9.2"
regex = "1.11.1"
clap = { version = "4.5.45", features = ["derive"] }
dotenvy = "0.15.7"
mongodb = "3.2.4"
openssl = "0.10.73"
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.143"
serde_json = "1.0.142"
sqlx = { version = "0.8.6", features = ["runtime-tokio", "tls-native-tls"] }
tokio = { version = "1.47.1", features = ["full", "rt-multi-thread", "signal"] }
tower-http = { version = "0.6.6", features = ["trace"] }
tracing = "0.1.41"
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
[features]
no-auth = []

View File

@@ -1,51 +0,0 @@
# Use the official Rust image as a base
FROM rust:bookworm as builder
# Set the working directory
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
pkg-config \
libssl-dev \
# Add any other dependencies required by your project
# For example, if you use postgres/mysql/sqlite, you might need libpq-dev, libmysqlclient-dev, libsqlite3-dev
&& rm -rf /var/lib/apt/lists/*
# Copy the Cargo.toml and Cargo.lock first to leverage Docker cache
# This layer only rebuilds if dependencies change
COPY Cargo.toml Cargo.lock ./
# Create a dummy src directory and main.rs to build dependencies
# This caches the dependency build
RUN mkdir -p src && echo "fn main() {println!(\"hello world\");}" > src/main.rs
# Build dependencies
RUN cargo build --release && rm -rf src
# Copy the actual source code
COPY . .
# Build the release binary
RUN cargo build --release
# --- Start a new stage for a smaller final image ---
FROM debian:bookworm-slim
# Set the working directory
WORKDIR /app
# Install runtime dependencies if any
# For example, if your Rust application dynamically links to OpenSSL, you might need libssl3
RUN apt-get update && apt-get install -y \
libssl3 \
# Add any other runtime dependencies here
&& rm -rf /var/lib/apt/lists/*
# Copy the built binary from the builder stage
COPY --from=builder /app/target/release/employee-tracking-backend .
# Expose the port your application listens on
EXPOSE 3000
# Set the entrypoint command to run your application
CMD ["./employee-tracking-backend"]

View File

@@ -55,34 +55,12 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
cargo build
```
4. **Run the application database with Docker (Recommended):**
If you have Docker installed
5. **Run the application (Manual with Docker):**
First, start the MongoDB container:
```sh
docker run \
--name mongodb \
-p 27017:27017 \
-e MONGO_INITDB_ROOT_USERNAME=admin \
-e MONGO_INITDB_ROOT_PASSWORD=password123 \
-e MONGO_INITDB_DATABASE=purenotify \
-v mongodb_data:/data/db \
mongo:latest
```
(Note: The `purenotify` database name here should match `DATABASE_NAME` in your `.env` or `config.rs` for the backend to connect correctly. The `MONGODB_URI` for the backend would be `mongodb://127.0.0.1:27017` or `mongodb://localhost:27017`.)
Then, run the Rust application:
4. **Run the application:**
For development, you can run the project directly with `cargo run`:
```sh
cargo run
```
For a release build, run:
```sh
cargo run --release
```

View File

@@ -1,38 +0,0 @@
version: "3.8"
services:
purenotify_backend:
build:
context: .
dockerfile: Dockerfile
ports:
- "3000:3000"
environment:
# These should match the defaults or your specific configuration in config.rs
BIND_ADDRESS: "0.0.0.0:3000"
MONGODB_URI: "mongodb://mongodb:27017"
DATABASE_NAME: "purenotify"
RUST_LOG: "info,tower_http=debug,mongodb=debug"
depends_on:
- mongodb
# Optional: If you want to enable the no-auth feature for local development
# command: cargo run --features "no-auth"
mongodb:
image: mongo:6.0
ports:
- "27017:27017"
volumes:
- mongodb_data:/data/db
environment:
MONGO_INITDB_ROOT_USERNAME: admin
MONGO_INITDB_ROOT_PASSWORD: password123
MONGO_INITDB_DATABASE: purenotify
# Optional: MongoDB authentication (highly recommended for production)
# MONGO_INITDB_ROOT_USERNAME: your_mongo_username
# MONGO_INITDB_ROOT_PASSWORD: your_mongo_password
# MONGODB_REPLICA_SET_NAME: rs0 # Uncomment for replica set
volumes:
mongodb_data:

View File

@@ -4,23 +4,23 @@ use std::env;
use std::net::SocketAddr;
use std::str::FromStr;
#[cfg(feature = "no-auth")]
use tracing::error;
#[derive(Debug, Clone)]
#[derive(Debug)]
pub struct Config {
pub bind_address: SocketAddr,
pub mongodb_uri: String,
pub database_name: String,
}
impl Config {
pub fn from_env() -> Result<Self, String> {
let bind_address_str =
env::var("BIND_ADDRESS").unwrap_or_else(|_| "127.0.0.1:3000".to_string());
let bind_address = SocketAddr::from_str(&bind_address_str)
.map_err(|e| format!("Invalid BIND_ADDRESS: {}", e))?;
#[cfg(feature = "no-auth")]
if bind_address.ip() != std::net::IpAddr::from([127, 0, 0, 1]) {
error!("In no-auth mode, BIND_ADDRESS must be 127.0.0.1");
return Err("In no-auth mode, BIND_ADDRESS must be 127.0.0.1".to_string());
@@ -29,12 +29,9 @@ impl Config {
let mongodb_uri =
env::var("MONGODB_URI").unwrap_or_else(|_| "mongodb://localhost:27017".to_string());
let database_name = env::var("DATABASE_NAME").unwrap_or_else(|_| "purenotify".to_string());
Ok(Self {
bind_address,
mongodb_uri,
database_name,
})
}
}

103
src/db/migrations.rs Normal file
View File

@@ -0,0 +1,103 @@
// src/db/migrations.rs
use mongodb::bson::doc;
use mongodb::{
Client, Database, IndexModel,
options::{ClientOptions, IndexOptions},
};
use tracing::info;
pub struct Migrator {
db: Database,
}
impl Migrator {
pub async fn new(uri: &str) -> Result<Self, mongodb::error::Error> {
let client_options = ClientOptions::parse(uri).await?;
let client = Client::with_options(client_options)?;
let db = client.database("employee_tracking");
Ok(Self { db })
}
pub async fn run(&self) -> Result<(), mongodb::error::Error> {
info!("Running migrations...");
// Users collection
self.db.create_collection("users").await?;
self.db
.collection::<bson::Document>("users")
.create_index(
IndexModel::builder()
.keys(doc! { "email": 1 })
.options(IndexOptions::builder().sparse(true).unique(true).build())
.build(),
)
.await?;
// Employees collection
self.db.create_collection("employees").await?;
self.db
.collection::<bson::Document>("employees")
.create_index(
IndexModel::builder()
.keys(doc! { "email": 1 })
.options(IndexOptions::builder().sparse(true).unique(true).build())
.build(),
)
.await?;
// Punches collection
self.db.create_collection("punches").await?;
self.db
.collection::<bson::Document>("punches")
.create_index(
IndexModel::builder()
.keys(doc! { "employee_id": 1, "clock_out_at": 1 })
.options(
IndexOptions::builder()
.partial_filter_expression(doc! { "clock_out_at": null })
.unique(true)
.build(),
)
.build(),
)
.await?;
// Shifts collection
self.db.create_collection("shifts").await?;
self.db
.collection::<bson::Document>("shifts")
.create_index(
IndexModel::builder()
.keys(doc! { "employee_id": 1, "start_at": 1, "end_at": 1 })
.build(),
)
.await?;
// Leave requests collection
self.db.create_collection("leave_requests").await?;
self.db
.collection::<bson::Document>("leave_requests")
.create_index(
IndexModel::builder()
.keys(doc! { "employee_id": 1, "start_date": 1, "end_date": 1, "status": 1 })
.build(),
)
.await?;
// Inventory items collection (optional)
self.db.create_collection("inventory_items").await?;
self.db
.collection::<bson::Document>("inventory_items")
.create_index(
IndexModel::builder()
.keys(doc! { "sku": 1 })
.options(IndexOptions::builder().unique(true).build())
.build(),
)
.await?;
info!("Migrations completed.");
Ok(())
}
}

4
src/db/mod.rs Normal file
View File

@@ -0,0 +1,4 @@
// src/db/mod.rs
pub mod migrations;
pub mod seed;

138
src/db/seed.rs Normal file
View File

@@ -0,0 +1,138 @@
// src/db/seed.rs
use chrono::Utc;
use mongodb::{
Client, Database,
bson::{DateTime, doc, oid::ObjectId},
};
use tracing::info;
pub struct Seeder {
db: Database,
}
impl Seeder {
pub async fn new(uri: &str) -> Result<Self, mongodb::error::Error> {
let client = Client::with_uri_str(uri).await?;
let db = client.database("employee_tracking");
Ok(Self { db })
}
pub async fn run(&self) -> Result<(), mongodb::error::Error> {
info!("Seeding database...");
// Clear collections
self.db
.collection::<bson::Document>("users")
.delete_many(doc! {})
.await?;
self.db
.collection::<bson::Document>("employees")
.delete_many(doc! {})
.await?;
self.db
.collection::<bson::Document>("punches")
.delete_many(doc! {})
.await?;
self.db
.collection::<bson::Document>("shifts")
.delete_many(doc! {})
.await?;
self.db
.collection::<bson::Document>("leave_requests")
.delete_many(doc! {})
.await?;
// Seed users
let manager_id = ObjectId::new();
self.db
.collection("users")
.insert_one(doc! {
"_id": manager_id.clone(),
"role": "manager",
"email": "manager@example.com"
})
.await?;
// Seed employees
let emp1_id = ObjectId::new();
let emp2_id = ObjectId::new();
self.db
.collection("employees")
.insert_many(vec![
doc! {
"_id": emp1_id.clone(),
"full_name": "John Doe",
"email": "john.doe@example.com",
"position": "Developer",
"active": true,
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
},
doc! {
"_id": emp2_id.clone(),
"full_name": "Jane Smith",
"email": "jane.smith@example.com",
"position": "Designer",
"active": true,
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
},
])
.await?;
// Seed punches
self.db
.collection("punches")
.insert_one(doc! {
"_id": ObjectId::new(),
"employee_id": emp1_id.clone(),
"clock_in_at": DateTime::from_millis(Utc::now().timestamp_millis()),
"clock_out_at": null,
"source": "web",
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
})
.await?;
// Seed shifts
self.db
.collection("shifts")
.insert_one(doc! {
"_id": ObjectId::new(),
"employee_id": emp1_id.clone(),
"start_at": DateTime::from_millis(Utc::now().timestamp_millis()),
"end_at": DateTime::from_millis(Utc::now().timestamp_millis() + 8 * 3600 * 1000),
"created_by": manager_id.clone(),
"notes": "Morning shift",
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
})
.await?;
// Seed leave requests
self.db.collection("leave_requests").insert_many(vec![
doc! {
"_id": ObjectId::new(),
"employee_id": emp1_id.clone(),
"start_date": DateTime::from_millis(Utc::now().timestamp_millis() + 2 * 24 * 3600 * 1000),
"end_date": DateTime::from_millis(Utc::now().timestamp_millis() + 4 * 24 * 3600 * 1000),
"status": "approved",
"reason": "Vacation",
"reviewed_by": manager_id.clone(),
"reviewed_at": DateTime::from_millis(Utc::now().timestamp_millis()),
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
},
doc! {
"_id": ObjectId::new(),
"employee_id": emp2_id.clone(),
"start_date": DateTime::from_millis(Utc::now().timestamp_millis() + 5 * 24 * 3600 * 1000),
"end_date": DateTime::from_millis(Utc::now().timestamp_millis() + 6 * 24 * 3600 * 1000),
"status": "pending",
"reason": "Medical",
"reviewed_by": null,
"reviewed_at": null,
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
},
]).await?;
info!("Seeding completed.");
Ok(())
}
}

View File

@@ -1,4 +1,5 @@
// src/handlers/mod.rs
pub mod health;
pub mod shift;
pub mod user;

View File

@@ -0,0 +1,3 @@
// src/handlers/shift/mod.rs
pub mod shift;

View File

@@ -0,0 +1,80 @@
// src/handlers/shift/shift.rs
use axum::{Extension, Json, http::StatusCode};
use chrono::Utc;
use mongodb::{
Database,
bson::{DateTime, doc},
};
use serde_json::json;
pub async fn create_shift(
Extension(db): Extension<Database>,
Json(payload): Json<serde_json::Value>,
) -> impl axum::response::IntoResponse {
let employee_id = payload
.get("employee_id")
.and_then(|v| v.as_str())
.unwrap_or("");
let start_at = payload
.get("start_at")
.and_then(|v| v.as_i64())
.unwrap_or(0);
let end_at = payload.get("end_at").and_then(|v| v.as_i64()).unwrap_or(0);
// Validate no overlapping shifts
let shifts = db.collection::<bson::Document>("shifts");
let overlap = shifts
.find_one(doc! {
"employee_id": employee_id,
"$or": [
{ "start_at": { "$lte": DateTime::from_millis(end_at) } },
{ "end_at": { "$gte": DateTime::from_millis(start_at) } },
]
})
.await
.unwrap();
if overlap.is_some() {
return (
StatusCode::BAD_REQUEST,
Json(json!({
"message": "Shift overlaps with existing shift",
"success": false,
"error": true
})),
);
}
// Insert shift
let result = shifts
.insert_one(doc! {
"_id": bson::oid::ObjectId::new(),
"employee_id": employee_id,
"start_at": DateTime::from_millis(start_at),
"end_at": DateTime::from_millis(end_at),
"created_by": null,
"notes": payload.get("notes").and_then(|v| v.as_str()).unwrap_or(""),
"created_at": DateTime::from_millis(Utc::now().timestamp_millis())
})
.await;
match result {
Ok(_) => (
StatusCode::CREATED,
Json(json!({
"message": "Shift created successfully",
"success": true,
"error": false
})),
),
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({
"message": format!("Failed to create shift: {}", e),
"success": false,
"error": true
})),
),
}
}

View File

@@ -1,4 +1,3 @@
// src/handlers/user/mod.rs
pub mod register;
pub mod user;

View File

@@ -1,40 +0,0 @@
// src/handlers/register/register.rs
use axum::Json;
use axum::http::StatusCode;
use axum::response::IntoResponse;
use serde::Deserialize;
use serde_json::json;
#[derive(Debug, Deserialize)]
pub struct RegisterPayload {
pub email: String,
pub password: String,
}
impl RegisterPayload {
pub fn new(email: String, password: String) -> Self {
RegisterPayload { email, password }
}
}
pub async fn register(Json(_payload): Json<RegisterPayload>) -> impl IntoResponse {
// TODO: Implement user registration logic using the user repository in ./src/mongodb/repositories/user
(
StatusCode::OK,
Json(json!(
{
"message": "new user registered",
"data": {
"user": {
"email" : _payload.email,
"password": _payload.password,
}
},
"success": true,
"error": false,
}
)),
)
}

View File

@@ -1,111 +1,112 @@
// src/main.rs
use std::{process::exit, sync::Arc};
use std::process::exit;
use axum::Router;
use clap::{Parser, Subcommand};
use dotenvy::dotenv;
use mongodb::Client;
use tokio::signal;
use tower_http::trace::TraceLayer;
use tracing::{error, info};
use tracing_subscriber::{EnvFilter, fmt, prelude::*};
mod config;
use crate::config::Config;
mod db; // Updated to import db module instead of migrations and seed
mod handlers;
mod mongo; // local module wrapping the Mongo client
mod routes;
use ::mongodb::Database; // external crate (absolute path avoids name clash)
use config::Config;
use mongo::MongoDb; // your wrapper
#[derive(Parser)]
struct Cli {
#[command(subcommand)]
command: Option<Commands>,
}
// Shared application state for online mode
pub struct AppState {
pub db: Database,
pub config: Config,
#[derive(Subcommand)]
enum Commands {
Migrate,
Seed,
}
#[tokio::main]
async fn main() {
// Load .env early
// Load environment variables from .env file
dotenv().ok();
// Tracing with a safe fallback if RUST_LOG is unset
let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"));
// Initialize tracing
tracing_subscriber::registry()
.with(fmt::layer())
.with(env_filter)
.with(EnvFilter::from_env("RUST_LOG"))
.init();
// Load config
let config = match Config::from_env() {
Ok(c) => c,
Err(e) => {
error!("Failed to load config: {e}");
error!("Failed to load config: {}", e);
exit(1);
}
};
// Runtime OFFLINE switch: true if OFFLINE is 1/true/yes/on (case-insensitive)
let offline = std::env::var("OFFLINE")
.ok()
.map(|v| matches!(v.to_ascii_lowercase().as_str(), "1" | "true" | "yes" | "on"))
.unwrap_or(false);
if offline {
// Enforce loopback binding while offline
if !config.bind_address.ip().is_loopback() {
error!(
"OFFLINE=true requires binding to a loopback address (e.g., 127.0.0.1:<port> or [::1]:<port>), got {}",
config.bind_address
);
// Initialize MongoDB client
let client = match Client::with_uri_str(&config.mongodb_uri).await {
Ok(client) => client,
Err(e) => {
error!("Failed to initialize MongoDB client: {}", e);
exit(1);
}
};
let db = client.database("employee_tracking");
info!("OFFLINE mode enabled — not connecting to MongoDB");
info!("Server starting on {}", config.bind_address);
// Health-only, no state. Subrouter is typed to `()`.
let app = Router::new()
.nest("/health", routes::health::health::health_routes::<()>())
.layer(TraceLayer::new_for_http());
let listener = tokio::net::TcpListener::bind(config.bind_address)
.await
.unwrap();
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal())
.await
.unwrap();
return;
// Handle CLI commands
let cli = Cli::parse();
match cli.command {
Some(Commands::Migrate) => {
let migrator = match db::migrations::Migrator::new(&config.mongodb_uri).await {
Ok(m) => m,
Err(e) => {
error!("Failed to initialize migrator: {}", e);
exit(1);
}
};
migrator.run().await.unwrap_or_else(|e| {
error!("Failed to run migrations: {}", e);
exit(1);
});
return;
}
Some(Commands::Seed) => {
let seeder = match db::seed::Seeder::new(&config.mongodb_uri).await {
Ok(s) => s,
Err(e) => {
error!("Failed to initialize seeder: {}", e);
exit(1);
}
};
seeder.run().await.unwrap_or_else(|e| {
error!("Failed to run seed: {}", e);
exit(1);
});
return;
}
None => {}
}
// --- Online (DB-enabled) path ---
let mongo = match MongoDb::connect(&config).await {
Ok(db) => db,
Err(e) => {
error!("Failed to connect to MongoDB: {e}");
exit(1);
}
};
let shared_state = Arc::new(AppState {
db: mongo.database,
config: config.clone(),
});
#[cfg(feature = "no-auth")]
info!("NO-AUTH MODE ENABLED");
info!("Server starting on {}", config.bind_address);
// Build subrouters typed with the same state as the root
let health_router = routes::health::health::health_routes::<Arc<AppState>>();
let user_router = routes::user::user::user_routes::<Arc<AppState>>();
// Root router typed with state; set state once on the root
let app = Router::<Arc<AppState>>::new()
.nest("/health", health_router)
.nest("/user", user_router)
.with_state(shared_state)
// Build the Axum router
let app = Router::new()
.nest("/health", routes::health::health::health_routes())
.nest("/user", routes::user::user::user_routes())
.nest("/shift", routes::shift::shift::shift_routes())
.layer(axum::Extension(db)) // Pass MongoDB database to handlers
.layer(TraceLayer::new_for_http());
// Run the server
let listener = tokio::net::TcpListener::bind(config.bind_address)
.await
.unwrap();

View File

@@ -1,6 +0,0 @@
// src/mongodb/mod.rs
pub mod models;
pub mod mongodb;
pub use mongodb::MongoDb;

View File

@@ -1,5 +0,0 @@
// src/mongodb/models/mod.rs
pub mod user;
// Re-exports can be added here when needed

View File

@@ -1,98 +0,0 @@
// models/user.rs
use bson::oid::ObjectId;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct User {
#[serde(rename = "_id", skip_serializing_if = "Option::is_none")]
pub id: Option<ObjectId>,
pub username: String,
pub email: String,
pub first_name: String,
pub last_name: String,
pub age: u32,
pub is_active: bool,
pub phone_number: String,
pub password: String,
pub salt: String,
#[serde(default = "chrono::Utc::now")]
pub created_at: DateTime<Utc>,
#[serde(default = "chrono::Utc::now")]
pub updated_at: DateTime<Utc>,
pub last_login: Option<DateTime<Utc>>,
pub role: String,
pub profile: Option<Profile>,
pub preferences: Option<Preferences>,
pub stats: Option<Stats>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Profile {
pub avatar_url: String,
pub bio: String,
pub location: String,
pub website: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Preferences {
pub theme: String,
pub language: String,
pub notifications_enabled: bool,
pub email_verified: bool,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Stats {
pub total_posts: u32,
pub total_comments: u32,
pub total_likes: u32,
pub account_age_days: u32,
}
impl User {
pub fn new(username: String, email: String, password: String, salt: String) -> Self {
let now = chrono::Utc::now();
Self {
id: None,
username,
email,
first_name: String::new(),
last_name: String::new(),
age: 0,
is_active: true,
phone_number: String::new(),
password,
salt,
created_at: now,
updated_at: now,
last_login: None,
role: "user".to_string(),
profile: None,
preferences: None,
stats: None,
}
}
pub fn with_profile(mut self, profile: Profile) -> Self {
self.profile = Some(profile);
self
}
pub fn with_preferences(mut self, preferences: Preferences) -> Self {
self.preferences = Some(preferences);
self
}
pub fn with_stats(mut self, stats: Stats) -> Self {
self.stats = Some(stats);
self
}
pub fn update_last_login(&mut self) {
self.last_login = Some(chrono::Utc::now());
self.updated_at = chrono::Utc::now();
}
}

View File

@@ -1,38 +0,0 @@
use crate::config::Config;
use mongodb::options::{ClientOptions, ServerApi, ServerApiVersion};
use mongodb::{Client, Database};
pub struct MongoDb {
// pub client: Client,
pub database: Database,
}
impl MongoDb {
pub async fn connect(config: &Config) -> Result<Self, mongodb::error::Error> {
// Parse connection string from config
let mut client_options = ClientOptions::parse(&config.mongodb_uri).await?;
// Set the server API version (optional but recommended for MongoDB Atlas)
let server_api = ServerApi::builder().version(ServerApiVersion::V1).build();
client_options.server_api = Some(server_api);
// Optional: Set additional options
client_options.app_name = Some("PureNotify".to_string());
// Create client
let client = Client::with_options(client_options)?;
// Ping the server to verify connection
client
.database("admin")
.run_command(mongodb::bson::doc! {"ping": 1})
.await?;
println!("✅ Successfully connected to MongoDB!");
// Get database handle using the database_name from config
let database = client.database(&config.database_name);
Ok(MongoDb { database })
}
}

View File

@@ -1,191 +0,0 @@
use async_trait::async_trait;
use bson::oid::ObjectId;
use futures::TryStreamExt;
use mongodb::Collection;
use mongodb::bson::doc;
use mongodb::options::FindOptions;
use super::user_repository::{UserError, UserRepository};
use crate::models::user::User;
pub struct MongoUserRepository {
collection: Collection<User>,
}
impl MongoUserRepository {
pub fn new(collection: Collection<User>) -> Self {
Self { collection }
}
}
#[async_trait]
impl UserRepository for MongoUserRepository {
async fn create(&self, mut user: User) -> Result<User, UserError> {
// Validate required fields
if user.username.is_empty() {
return Err(UserError::ValidationError(
"Username is required".to_string(),
));
}
if user.email.is_empty() {
return Err(UserError::ValidationError("Email is required".to_string()));
}
// Check for existing users
if self.exists_by_username(user.username.clone()).await? {
return Err(UserError::DuplicateKey("username".to_string()));
}
if self.exists_by_email(user.email.clone()).await? {
return Err(UserError::DuplicateKey("email".to_string()));
}
// Set timestamps
let now = chrono::Utc::now();
user.created_at = now;
user.updated_at = now;
user.id = None; // Let MongoDB generate the ID
let result = self.collection.insert_one(&user, None).await?;
// Return the created user with the new ID
user.id = result.inserted_id.as_object_id();
Ok(user)
}
async fn get(&self, id: ObjectId) -> Result<User, UserError> {
let user = self.collection.find_one(doc! {"_id": id}, None).await?;
user.ok_or(UserError::NotFound)
}
async fn update(&self, id: ObjectId, mut user: User) -> Result<User, UserError> {
// Update the timestamp
user.updated_at = chrono::Utc::now();
user.id = Some(id);
let result = self
.collection
.replace_one(doc! {"_id": id}, &user, None)
.await?;
if result.matched_count == 0 {
return Err(UserError::NotFound);
}
Ok(user)
}
async fn delete(&self, id: ObjectId) -> Result<(), UserError> {
let result = self.collection.delete_one(doc! {"_id": id}, None).await?;
if result.deleted_count == 0 {
return Err(UserError::NotFound);
}
Ok(())
}
async fn list(&self, limit: Option<i64>, skip: Option<u64>) -> Result<Vec<User>, UserError> {
let find_options = FindOptions::builder().limit(limit).skip(skip).build();
let cursor = self.collection.find(None, find_options).await?;
let users: Vec<User> = cursor.try_collect().await?;
Ok(users)
}
async fn search(&self, query: String) -> Result<Vec<User>, UserError> {
// Use regex for partial matching or text search
let filter = doc! {
"$or": [
{"username": {"$regex": &query, "$options": "i"}},
{"email": {"$regex": &query, "$options": "i"}},
{"first_name": {"$regex": &query, "$options": "i"}},
{"last_name": {"$regex": &query, "$options": "i"}}
]
};
let cursor = self.collection.find(filter, None).await?;
let users: Vec<User> = cursor.try_collect().await?;
Ok(users)
}
async fn count(&self) -> Result<u64, UserError> {
let count = self.collection.count_documents(None, None).await?;
Ok(count)
}
async fn count_by_name(&self, name: String) -> Result<u64, UserError> {
let filter = doc! {
"$or": [
{"first_name": &name},
{"last_name": &name}
]
};
let count = self.collection.count_documents(filter, None).await?;
Ok(count)
}
async fn count_by_email(&self, email: String) -> Result<u64, UserError> {
let count = self
.collection
.count_documents(doc! {"email": email}, None)
.await?;
Ok(count)
}
async fn count_by_phone(&self, phone: String) -> Result<u64, UserError> {
let count = self
.collection
.count_documents(doc! {"phone_number": phone}, None)
.await?;
Ok(count)
}
async fn count_by_id(&self, id: ObjectId) -> Result<u64, UserError> {
let count = self
.collection
.count_documents(doc! {"_id": id}, None)
.await?;
Ok(count)
}
async fn find_by_email(&self, email: String) -> Result<Option<User>, UserError> {
let user = self
.collection
.find_one(doc! {"email": email}, None)
.await?;
Ok(user)
}
async fn find_by_username(&self, username: String) -> Result<Option<User>, UserError> {
let user = self
.collection
.find_one(doc! {"username": username}, None)
.await?;
Ok(user)
}
async fn exists_by_email(&self, email: String) -> Result<bool, UserError> {
let count = self.count_by_email(email).await?;
Ok(count > 0)
}
async fn exists_by_username(&self, username: String) -> Result<bool, UserError> {
let count = self
.collection
.count_documents(doc! {"username": username}, None)
.await?;
Ok(count > 0)
}
async fn get_active_users(&self) -> Result<Vec<User>, UserError> {
let cursor = self.collection.find(doc! {"is_active": true}, None).await?;
let users: Vec<User> = cursor.try_collect().await?;
Ok(users)
}
async fn get_users_by_role(&self, role: String) -> Result<Vec<User>, UserError> {
let cursor = self.collection.find(doc! {"role": role}, None).await?;
let users: Vec<User> = cursor.try_collect().await?;
Ok(users)
}
}

View File

@@ -1,53 +0,0 @@
use async_trait::async_trait;
use bson::oid::ObjectId;
use crate::models::user::User;
// Define custom error type
#[derive(Debug)]
pub enum UserError {
MongoError(mongodb::error::Error),
NotFound,
ValidationError(String),
DuplicateKey(String),
}
impl std::fmt::Display for UserError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
UserError::MongoError(e) => write!(f, "MongoDB error: {}", e),
UserError::NotFound => write!(f, "User not found"),
UserError::ValidationError(msg) => write!(f, "Validation error: {}", msg),
UserError::DuplicateKey(field) => write!(f, "Duplicate key error: {}", field),
}
}
}
impl std::error::Error for UserError {}
impl From<mongodb::error::Error> for UserError {
fn from(error: mongodb::error::Error) -> Self {
UserError::MongoError(error)
}
}
// Repository trait
#[async_trait]
pub trait UserRepository {
async fn create(&self, user: User) -> Result<User, UserError>;
async fn get(&self, id: ObjectId) -> Result<User, UserError>;
async fn update(&self, id: ObjectId, user: User) -> Result<User, UserError>;
async fn delete(&self, id: ObjectId) -> Result<(), UserError>;
async fn list(&self, limit: Option<i64>, skip: Option<u64>) -> Result<Vec<User>, UserError>;
async fn search(&self, query: String) -> Result<Vec<User>, UserError>;
async fn count(&self) -> Result<u64, UserError>;
async fn count_by_name(&self, name: String) -> Result<u64, UserError>;
async fn count_by_email(&self, email: String) -> Result<u64, UserError>;
async fn count_by_phone(&self, phone: String) -> Result<u64, UserError>;
async fn count_by_id(&self, id: ObjectId) -> Result<u64, UserError>;
async fn find_by_email(&self, email: String) -> Result<Option<User>, UserError>;
async fn find_by_username(&self, username: String) -> Result<Option<User>, UserError>;
async fn exists_by_email(&self, email: String) -> Result<bool, UserError>;
async fn exists_by_username(&self, username: String) -> Result<bool, UserError>;
async fn get_active_users(&self) -> Result<Vec<User>, UserError>;
async fn get_users_by_role(&self, role: String) -> Result<Vec<User>, UserError>;
}

View File

@@ -1,11 +1,9 @@
// src/routes/health/healh.rs
// src/routes/health/health.rs
use axum::{Router, routing::get};
pub fn health_routes<S>() -> Router<S>
where
S: Clone + Send + Sync + 'static,
{
// keep your existing routes/handlers here
Router::new().route("/", get(crate::handlers::health::health::health))
use crate::handlers::health::health::health;
pub fn health_routes() -> Router {
Router::new().route("/", get(health))
}

View File

@@ -1,4 +1,5 @@
// src/routes/mod.rs
pub mod health;
pub mod shift;
pub mod user;

3
src/routes/shift/mod.rs Normal file
View File

@@ -0,0 +1,3 @@
// src/routes/shift/mod.rs
pub mod shift;

View File

@@ -0,0 +1,9 @@
// src/routes/shift/shift.rs
use axum::{Router, routing::post};
use crate::handlers::shift::shift::create_shift;
pub fn shift_routes() -> Router {
Router::new().route("/", post(create_shift))
}

View File

@@ -1,16 +1,9 @@
// src/routes/user/user.rs
use axum::{
Router,
routing::{get, post},
};
use axum::{Router, routing::get};
pub fn user_routes<S>() -> Router<S>
where
S: Clone + Send + Sync + 'static,
{
// keep your existing routes/handlers here
Router::new()
.route("/", get(crate::handlers::user::user::user))
.route("/register", post(crate::handlers::user::register::register))
use crate::handlers::user::user::user;
pub fn user_routes() -> Router {
Router::new().route("/", get(user))
}

View File

@@ -1,83 +0,0 @@
use sha2::{Digest, Sha256};
use rand::Rng;
use regex::Regex;
pub struct PasswordUtils;
impl PasswordUtils {
pub fn hash_password(password: &str) -> String {
let mut hasher = Sha256::new();
hasher.update(password.as_bytes());
format!("{:x}", hasher.finalize())
}
pub fn compare_password(password: &str, hash: &str) -> bool {
Self::hash_password(password) == *hash
}
pub fn generate_salt() -> String {
let salt: [u8; 16] = rand::thread_rng().gen();
hex::encode(salt)
}
pub fn hash_password_with_salt(password: &str, salt: &str) -> String {
let mut hasher = Sha256::new();
hasher.update((password.to_owned() + salt).as_bytes());
format!("{:x}", hasher.finalize())
}
pub fn compare_password_with_salt(password: &str, hash: &str, salt: &str) -> bool {
Self::hash_password_with_salt(password, salt) == *hash
}
pub fn generate_password_reset_token() -> String {
let token: [u8; 32] = rand::thread_rng().gen();
hex::encode(token)
}
// This method in the JS was incorrect (verify_password_reset_token was comparing a hash to itself)
// A proper verification would involve hashing the provided token and comparing it to a stored hash.
// For now, I'll just return true, implying a successful generation and that the token is "valid" on its own.
// In a real application, you'd store the hashed token in the database and compare it during verification.
pub fn verify_password_reset_token(_token: &str) -> bool {
// In a real application, you would hash the token provided and compare it to a stored hash.
// For demonstration, we'll just return true.
true
}
pub fn hash_password_with_salt_and_pepper(password: &str, salt: &str, pepper: &str) -> String {
let mut hasher = Sha256::new();
hasher.update((password.to_owned() + salt + pepper).as_bytes());
format!("{:x}", hasher.finalize())
}
pub fn compare_password_with_salt_and_pepper(password: &str, hash: &str, salt: &str, pepper: &str) -> bool {
Self::hash_password_with_salt_and_pepper(password, salt, pepper) == *hash
}
pub fn check_password_strength(password: &str) -> bool {
let min_length = 8;
let has_upper_case = Regex::new(r"[A-Z]").unwrap();
let has_lower_case = Regex::new(r"[a-z]").unwrap();
let has_numbers = Regex::new(r"\d").unwrap();
let has_special_chars = Regex::new(r"[!@#$%^&*]").unwrap();
password.len() >= min_length
&& has_upper_case.is_match(password)
&& has_lower_case.is_match(password)
&& has_numbers.is_match(password)
&& has_special_chars.is_match(password)
}
pub fn generate_password(length: usize) -> String {
const CHARSET: &[u8] = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()";
let mut rng = rand::thread_rng();
let password: String = (0..length)
.map(|_| {
let idx = rng.gen_range(0..CHARSET.len());
CHARSET[idx] as char
})
.collect();
password
}
}