use std::fs; use std::collections::HashMap; use std::path::{PathBuf, Path}; use std::sync::{Arc,RwLock}; use chrono::{DateTime, Utc}; use futures::{FutureExt, TryStreamExt}; use uuid::Uuid; use fs2::FileExt; use tokio::runtime; use tokio_stream::wrappers::ReadDirStream; use futures::stream::StreamExt; #[derive(Hash, PartialEq, Eq, Clone)] struct PageUuid(Uuid); #[derive(Hash, PartialEq, Eq, Clone)] struct NamespaceUuid(Uuid); #[derive(Hash, PartialEq, Eq, Clone)] struct MediaUuid(Uuid); struct ContentSnapshot { pages: HashMap, namespaces: HashMap, media: HashMap, namespace_path: HashMap, page_path: HashMap, media_path: HashMap, render_cache: HashMap, } struct Page { uuid: PageUuid, title: String, namespace: NamespaceUuid, slug: String, current_version: DateTime, prev_version: DateTime, } struct Namespace { uuid: NamespaceUuid, path: String, pages: Vec, } struct Media { uuid: MediaUuid, filename: String, mime_type: String, uploaded_by: Uuid, uploaded_on: Uuid, used_on: Vec, } struct ContentController { snapshot: RwLock>>, lock: fs::File, } impl ContentController { pub fn init(data_dir: PathBuf) -> Result { let lock_path = Path::join(&data_dir, ".lock"); let lockfile = fs::OpenOptions::new() .read(true).write(true).create(true) .open(&lock_path) .map_err(|_| "Could not open data directory".to_string())?; lockfile.try_lock_exclusive() .map_err(|_| "Could not lock data directory".to_string())?; let runtime = runtime::Builder::new_multi_thread() .build() .map_err(|_| "Could not start async runtime".to_string())?; // Read the things let snapshot = runtime.block_on(Self::read_data(&data_dir))?; Ok(Self { lock: lockfile, snapshot: RwLock::new(Box::new(Arc::new(snapshot))), }) } async fn read_data(data_dir: &PathBuf) -> Result { use tokio::fs; let page_slugs = Arc::new(tokio::sync::Mutex::new(HashMap::::new())); let namespace_names_dir = Path::join(&data_dir, "namespaces/names"); let namespace_ids_dir = Path::join(&data_dir, "namespaces/id"); let namespace_future = fs::read_dir(&namespace_names_dir).await .map_err(|_| "Could not open namespace directory".to_string()) .map(|dir_entries| { ReadDirStream::new(dir_entries) })? .filter_map(async |dir_entry| -> Option { let link_path = dir_entry.as_ref().ok()?.path(); let target_path = dir_entry.as_ref().ok()? .metadata().await.ok()? .is_symlink() .then_some( fs::read_link(link_path).await.ok() )??; let last_segment = target_path.file_name()?; target_path.parent()? .eq(&namespace_ids_dir).then_some(())?; let namespace_name = dir_entry.as_ref().ok()?.file_name().to_str()?.to_string(); let namespace_uuid = NamespaceUuid(Uuid::try_parse(last_segment.to_str()?).ok()?); let namespace_pages = fs::read_dir(Path::join(&namespace_ids_dir, last_segment).join("pages")).await.ok()?; let namespace_page_uuids = ReadDirStream::new(namespace_pages) .filter_map(async |dir_entry| -> Option { let page_path = dir_entry.as_ref().ok()?.path(); let page_uuid = dir_entry.as_ref().ok()? .metadata().await.ok()? .is_symlink() .then_some( fs::read_link(&page_path).await.ok() )??; let page_uuid = PageUuid(Uuid::try_parse(&page_uuid.to_str()?).ok()?); let page_slug = page_path.file_name()?.to_str()?.to_string(); page_slugs.lock().await.insert(page_uuid.clone(), page_slug); Some(page_uuid) }).collect::>().await; Some(Namespace { uuid: namespace_uuid, path: namespace_name, pages: namespace_page_uuids, }) }).collect::>().await; let pages_dir = Path::join(&data_dir, "pages/id"); let page_future = fs::read_dir(&pages_dir).await .map_err(|_| "Could not open pages data directory".to_string()) .map(|dir_entries| { ReadDirStream::new(dir_entries) })? .filter_map(async |dir_entry| -> Option { }); return Err("Unimplemented".to_string()); } }