diff --git a/Cargo.lock b/Cargo.lock index 2417c42..2681e60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1935,6 +1935,7 @@ dependencies = [ "dotenv", "dotenv_codegen", "env_logger", + "lazy_static", "log", "reqwest", "rust_decimal", diff --git a/Cargo.toml b/Cargo.toml index 43d7ec5..c67a9a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,3 +21,4 @@ env_logger = "0.11.3" log = "0.4.21" dotenv = "0.15.0" dotenv_codegen = "0.15.0" +lazy_static = "1.4.0" diff --git a/assets/style.css b/assets/style.css index b159820..698e1f4 100644 --- a/assets/style.css +++ b/assets/style.css @@ -15,7 +15,7 @@ table, th, td { } body { - background-color: #d2e0ec; + background-color: #ccf2b3; /* #ffed8f; */ margin: 10px auto; max-width: 750px; width: 95%; diff --git a/src/main.rs b/src/main.rs index 321b362..d6db50c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,7 +1,8 @@ use axum::{routing::get, Router}; -use chrono::DateTime; use chrono::offset::Utc; +use chrono::DateTime; use log::*; +use std::collections::HashMap; #[macro_use] extern crate dotenv_codegen; @@ -9,35 +10,41 @@ extern crate dotenv; use dotenv::dotenv; use std::env; +use lazy_static::lazy_static; + mod uptime_service; -use uptime_service::{UptimeService, Uptime, UptimeType, UptimeStatus}; +use uptime_service::{Uptime, UptimeService, UptimeStatus, UptimeType}; #[derive(askama::Template)] #[template(path = "layout.html")] -struct ContentTemplate { - content: T +struct ContentTemplate { + content: T, + page_title: Option, + page_desc: Option, } #[derive(askama::Template)] #[template(path = "layout.html")] -struct RawContentTemplate { - content: String +struct RawContentTemplate { + content: String, + page_title: Option, + page_desc: Option, } struct UptimeInfo { - name: String, + name: String, uptime: String, response_time: String, status: String, - url: Option + url: Option, } #[derive(askama::Template)] #[template(path = "index.html")] struct IndexTemplate { uptime_infos: Vec, - last_updated: String + last_updated: String, } #[derive(askama::Template)] @@ -45,19 +52,20 @@ struct IndexTemplate { struct StatusTemplate { dctr_uptime_infos: Vec, svc_uptime_infos: Vec, - last_updated: String + last_updated: String, } -struct BlogInfo { - title: String, - date: String, - url: String +#[derive(Clone)] +struct BlogInfo<'a> { + title: &'a str, + date: &'a str, + url: &'a str, } #[derive(askama::Template)] #[template(path = "blog.html")] -struct BlogTemplate { - blogs: Vec +struct BlogTemplate<'a> { + blogs: Vec>, } #[derive(askama::Template)] @@ -66,7 +74,7 @@ struct DashboardTemplate {} #[derive(Clone)] struct AppState { - uptime_service: UptimeService + uptime_service: UptimeService, } #[tokio::main] @@ -80,7 +88,6 @@ async fn main() { uptime_service.start(); let state = AppState { uptime_service }; - let app = Router::new() .route("/", get(index_handler)) @@ -90,59 +97,80 @@ async fn main() { .route("/blogs/:blog_name", get(single_blog_handler)) .nest_service("/assets", tower_http::services::ServeDir::new("assets")) .with_state(state); - - let port_num = env::var("EXPOSE_PORT") - .unwrap_or("3000".to_string()); - + let port_num = env::var("EXPOSE_PORT").unwrap_or("3000".to_string()); - let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", port_num)) + let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", port_num)) .await .unwrap(); - + info!("Listening on port {}", port_num); axum::serve(listener, app).await.unwrap(); } -async fn blog_handler() -> Result, (axum::http::StatusCode, String)> { - Ok(ContentTemplate { content: BlogTemplate{ blogs: vec![ - BlogInfo { - title: String::from("Goodbye, NWS"), - date: String::from("May 15th, 2024"), - url: String::from("goodbye-nws"), + +lazy_static! { + static ref blogs: HashMap<&'static str, BlogInfo<'static>> = { + let mut m = HashMap::new(); + m.insert( + "11-08-2023-postmortem", + BlogInfo { + title: "Downtime Incident Postmortem", + date: "November 11th, 2023", + url: "11-08-2023-postmortem", + }, + ); + m.insert( + "ssl-on-cds", + BlogInfo { + title: "SSL on Container Deployment Service (at nickorlow.com)", + date: "July 12th, 2023", + url: "https://nickorlow.com/blogs/side-project-7-12-23.html", + }, + ); + m + }; +} + +async fn blog_handler( +) -> Result, (axum::http::StatusCode, String)> { + Ok(ContentTemplate { + page_title: Some("NWS | Blog".to_string()), + page_desc: Some("Read about the engineering behind NWS.".to_string()), + content: BlogTemplate { + blogs: blogs.values().cloned().collect::>() }, - BlogInfo { - title: String::from("Downtime Incident Postmortem"), - date: String::from("November 11th, 2023"), - url: String::from("11-08-2023-postmortem"), - }, - BlogInfo { - title: String::from("SSL on Container Deployment Service (at nickorlow.com)"), - date: String::from("July 12th, 2023"), - url: String::from("https://nickorlow.com/blogs/side-project-7-12-23.html"), - }, - ] } }) + }) } async fn single_blog_handler( - axum::extract::Path((blog_name)): axum::extract::Path<(String)> - ) -> Result { - let blog_content = match std::fs::read_to_string(format!("templates/blogs/{}.html", blog_name)) { + axum::extract::Path((blog_name)): axum::extract::Path<(String)>, +) -> Result { + let blog_content = match std::fs::read_to_string(format!("templates/blogs/{}.html", blog_name)) + { Ok(ctn) => ctn, - _ => String::from("

Not Found!

") + _ => String::from("

Not Found!

"), }; - Ok(RawContentTemplate { content: blog_content }) + Ok(RawContentTemplate { + page_title: Some("NWS | Blog Post".to_string()), + page_desc: Some("A Nick Web Services Blog Post.".to_string()), + content: blog_content, + }) } -async fn dashboard_handler() -> Result, (axum::http::StatusCode, String)> { - Ok(ContentTemplate { content: DashboardTemplate{} }) +async fn dashboard_handler( +) -> Result, (axum::http::StatusCode, String)> { + Ok(ContentTemplate { + page_title: Some("NWS | Dashboard".to_string()), + page_desc: Some("Manage the services you have deployed on NWS.".to_string()), + content: DashboardTemplate {}, + }) } async fn index_handler( axum::extract::State(state): axum::extract::State, - ) - -> Result, (axum::http::StatusCode, String)> { +) -> Result, (axum::http::StatusCode, String)> { let uptimes: Vec = state.uptime_service.get_data(); let lu: DateTime = state.uptime_service.get_last_updated().into(); let lu_str = format!("{} UTC", lu.format("%B %e, %Y %T")); @@ -154,33 +182,34 @@ async fn index_handler( continue; } - uptime_infos.push( - UptimeInfo { - name: uptime.name, - uptime: uptime.uptime, - response_time: uptime.response_time, - status: match uptime.status { - UptimeStatus::Up => String::from("Up"), - UptimeStatus::Down => String::from("DOWN"), - UptimeStatus::Maintenance => String::from("Undergoing Maintenance"), - _ => String::from("Unknown") - }, - url: None - } - ); + uptime_infos.push(UptimeInfo { + name: uptime.name, + uptime: uptime.uptime, + response_time: uptime.response_time, + status: match uptime.status { + UptimeStatus::Up => String::from("Up"), + UptimeStatus::Down => String::from("DOWN"), + UptimeStatus::Maintenance => String::from("Undergoing Maintenance"), + _ => String::from("Unknown"), + }, + url: None, + }); } - let index_template = IndexTemplate { - uptime_infos, - last_updated: lu_str + let index_template = IndexTemplate { + uptime_infos, + last_updated: lu_str, }; - Ok(ContentTemplate { content: index_template }) + Ok(ContentTemplate { + page_title: None, + page_desc: None, + content: index_template, + }) } async fn status_handler( axum::extract::State(state): axum::extract::State, - ) - -> Result, (axum::http::StatusCode, String)> { +) -> Result, (axum::http::StatusCode, String)> { let uptimes: Vec = state.uptime_service.get_data(); let lu: DateTime = state.uptime_service.get_last_updated().into(); let lu_str = format!("{} UTC", lu.format("%B %e, %Y %T")); @@ -189,48 +218,47 @@ async fn status_handler( let mut sv_uptime_infos: Vec = vec![]; for uptime in uptimes { - match uptime.uptime_type { UptimeType::Datacenter => { - dc_uptime_infos.push( - UptimeInfo { - name: uptime.name, - uptime: uptime.uptime, - response_time: uptime.response_time, - status: match uptime.status { - UptimeStatus::Up => String::from("Up"), - UptimeStatus::Down => String::from("DOWN"), - UptimeStatus::Maintenance => String::from("Undergoing Maintenance"), - _ => String::from("Unknown") - }, - url: None - } - ); - }, - UptimeType::Service => { - sv_uptime_infos.push( - UptimeInfo { - name: uptime.name, - uptime: uptime.uptime, - response_time: uptime.response_time, - status: match uptime.status { - UptimeStatus::Up => String::from("Up"), - UptimeStatus::Down => String::from("DOWN"), - UptimeStatus::Maintenance => String::from("Undergoing Maintenance"), - _ => String::from("Unknown") - }, - url: Some(uptime.url) - } - ); + dc_uptime_infos.push(UptimeInfo { + name: uptime.name, + uptime: uptime.uptime, + response_time: uptime.response_time, + status: match uptime.status { + UptimeStatus::Up => String::from("Up"), + UptimeStatus::Down => String::from("DOWN"), + UptimeStatus::Maintenance => String::from("Undergoing Maintenance"), + _ => String::from("Unknown"), + }, + url: None, + }); } - _ => continue + UptimeType::Service => { + sv_uptime_infos.push(UptimeInfo { + name: uptime.name, + uptime: uptime.uptime, + response_time: uptime.response_time, + status: match uptime.status { + UptimeStatus::Up => String::from("Up"), + UptimeStatus::Down => String::from("DOWN"), + UptimeStatus::Maintenance => String::from("Undergoing Maintenance"), + _ => String::from("Unknown"), + }, + url: Some(uptime.url), + }); + } + _ => continue, } } - let service_template = StatusTemplate { - dctr_uptime_infos: dc_uptime_infos, - svc_uptime_infos: sv_uptime_infos, - last_updated: lu_str + let service_template = StatusTemplate { + dctr_uptime_infos: dc_uptime_infos, + svc_uptime_infos: sv_uptime_infos, + last_updated: lu_str, }; - Ok(ContentTemplate { content: service_template }) + Ok(ContentTemplate { + page_title: Some("NWS | System Status".to_string()), + page_desc: Some("Check the health of NWS datacenters and services hosted on NWS.".to_string()), + content: service_template, + }) } diff --git a/src/uptime_service.rs b/src/uptime_service.rs index 60c3cf3..a9d264c 100644 --- a/src/uptime_service.rs +++ b/src/uptime_service.rs @@ -1,12 +1,12 @@ -use std::collections::HashMap; -use tokio::time::{sleep}; -use std::sync::{Arc, Mutex}; -use std::time::{Duration}; -use chrono::{Datelike, NaiveDate}; -use std::time::{SystemTime, UNIX_EPOCH}; +use anyhow::anyhow; use anyhow::Context; -use anyhow::{anyhow}; +use chrono::{Datelike, NaiveDate}; use log::*; +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; +use std::time::Duration; +use std::time::{SystemTime, UNIX_EPOCH}; +use tokio::time::sleep; #[macro_use] use dotenv::dotenv; @@ -17,7 +17,7 @@ pub enum UptimeType { Provider, Service, Datacenter, - Unknown + Unknown, } #[derive(Debug, PartialEq, Clone)] @@ -25,7 +25,7 @@ pub enum UptimeStatus { Up, Down, Maintenance, - Unknown + Unknown, } #[derive(Debug, Clone)] @@ -34,28 +34,29 @@ pub struct Uptime { pub uptime: String, pub response_time: String, pub status: UptimeStatus, - pub uptime_type: UptimeType, - pub url: String + pub uptime_type: UptimeType, + pub url: String, } #[derive(Debug, Clone)] pub struct UptimeServiceState { uptimes: Vec, - last_updated: SystemTime + last_updated: SystemTime, } #[derive(Debug, Clone)] pub struct UptimeService { - state: Arc> + state: Arc>, } impl UptimeService { const UPDATE_SECONDS: u64 = 300; pub fn new() -> Self { - let init_state = Arc::new(Mutex::new( - UptimeServiceState { uptimes: vec![], last_updated: UNIX_EPOCH } - )); + let init_state = Arc::new(Mutex::new(UptimeServiceState { + uptimes: vec![], + last_updated: UNIX_EPOCH, + })); Self { state: init_state } } @@ -63,18 +64,18 @@ impl UptimeService { info!("Starting UptimeService"); let cloned_state = Arc::clone(&self.state); tokio::spawn(async move { - loop { - let clonedx_state = Arc::clone(&cloned_state); - let res = Self::update_data(clonedx_state).await; - match res { - Err(err) => { - error!("{}", err); - }, - _ => {} + loop { + let clonedx_state = Arc::clone(&cloned_state); + let res = Self::update_data(clonedx_state).await; + match res { + Err(err) => { + error!("{}", err); } - sleep(tokio::time::Duration::from_secs(Self::UPDATE_SECONDS)).await; + _ => {} } - }); + sleep(tokio::time::Duration::from_secs(Self::UPDATE_SECONDS)).await; + } + }); } pub fn get_data(&self) -> Vec { @@ -90,7 +91,6 @@ impl UptimeService { } async fn update_data(arc_state: Arc>) -> ::anyhow::Result<()> { - debug!("Starting data update for UptimeService"); let mut request_vars = HashMap::new(); @@ -102,9 +102,10 @@ impl UptimeService { let current_year = chrono::Utc::today().year(); let january_1st = NaiveDate::from_ymd(current_year, 1, 1).and_hms(0, 0, 0); - let duration = january_1st.signed_duration_since(NaiveDate::from_ymd(1970, 1, 1).and_hms(0, 0, 0)); + let duration = + january_1st.signed_duration_since(NaiveDate::from_ymd(1970, 1, 1).and_hms(0, 0, 0)); let year_start = UNIX_EPOCH + Duration::from_secs(duration.num_seconds() as u64); - + //let ranges = &format!( // "{}_{}-{}_{}", // thirty_days_ago.duration_since(SystemTime::UNIX_EPOCH)?.as_secs(), @@ -122,107 +123,124 @@ impl UptimeService { request_vars.insert("response_times", "1"); let client = reqwest::Client::new(); - let res = client.post("https://api.uptimerobot.com/v2/getMonitors") + let res = client + .post("https://api.uptimerobot.com/v2/getMonitors") .form(&request_vars) .send() .await?; - let resp = res.json::() - .await?; + let resp = res.json::().await?; - let monitors = resp.get("monitors") - .context("Response did not have a monitors subobject")? - .as_array() - .context("Monitors subobject was not an array")?; + let monitors = resp + .get("monitors") + .context("Response did not have a monitors subobject")? + .as_array() + .context("Monitors subobject was not an array")?; - - let mut state = match arc_state.lock(){ + let mut state = match arc_state.lock() { Ok(val) => val, - Err(_) => {return Err(anyhow!("Could not lock shared state"));} + Err(_) => { + return Err(anyhow!("Could not lock shared state")); + } }; state.uptimes.clear(); for monitor in monitors { - let monitor_fqn = monitor.get("friendly_name") - .context("Monitor did not have property 'friendly_name'")?; - + let monitor_fqn = monitor + .get("friendly_name") + .context("Monitor did not have property 'friendly_name'")?; + debug!("Monitor '{}' processing", monitor_fqn); - let split_str: Vec<&str> = monitor_fqn.as_str() - .context("Expected 'friendly_name' to be a string")? - .split(".").collect(); + let split_str: Vec<&str> = monitor_fqn + .as_str() + .context("Expected 'friendly_name' to be a string")? + .split(".") + .collect(); if split_str.len() != 2 { debug!("Monitor '{}' excluded due to bad format", monitor_fqn); continue; } - let monitor_nt = String::from(*split_str.get(0).context("Expected name to have first part")?); - let monitor_name = String::from(*split_str.get(1).context("Expected name to have second part")?); + let monitor_nt = String::from( + *split_str + .get(0) + .context("Expected name to have first part")?, + ); + let monitor_name = String::from( + *split_str + .get(1) + .context("Expected name to have second part")?, + ); let monitor_type = match monitor_nt.as_str() { "datacenter" => UptimeType::Datacenter, "service" => UptimeType::Service, "competitor" => UptimeType::Provider, - _ => UptimeType::Unknown + _ => UptimeType::Unknown, }; - if monitor_type == UptimeType::Unknown { debug!("Monitor '{}' excluded due to unknown type", monitor_fqn); continue; } - let monitor_status_num = monitor.get("status") - .context("Expected monitor to have 'status' property")? - .as_u64() - .context("Expected 'status' property to be u64")?; + let monitor_status_num = monitor + .get("status") + .context("Expected monitor to have 'status' property")? + .as_u64() + .context("Expected 'status' property to be u64")?; let monitor_status = match monitor_status_num { 0 => UptimeStatus::Maintenance, 1 | 8 | 9 => UptimeStatus::Down, 2 => UptimeStatus::Up, - _ => UptimeStatus::Unknown + _ => UptimeStatus::Unknown, }; if monitor_status == UptimeStatus::Unknown { - debug!("Monitor '{}' excluded due to unknown status (status was {})", monitor_fqn, monitor_status_num); + debug!( + "Monitor '{}' excluded due to unknown status (status was {})", + monitor_fqn, monitor_status_num + ); continue; } - let monitor_rt_val = monitor.get("average_response_time") - .context("Expected monitor to have property 'average_response_time'")?; - + let monitor_rt_val = monitor + .get("average_response_time") + .context("Expected monitor to have property 'average_response_time'")?; // Because UptimeRobot has the world's worst API ever - // and decided that it's okay to return multiple datatypes + // and decided that it's okay to return multiple datatypes // for one property based on how they're feeling let monitor_rt = match monitor_rt_val.as_str() { Some(string) => format!("{}ms", string), - _ => format!("N/A") + _ => format!("N/A"), }; - let monitor_uptime = format!("{}%", - monitor.get("custom_uptime_ranges") - .context("Expected monitor to have property 'custom_uptime_ranges'")? - .as_str() - .context("Expected 'custom_uptime_ranges' to be String")? - ); + let monitor_uptime = format!( + "{}%", + monitor + .get("custom_uptime_ranges") + .context("Expected monitor to have property 'custom_uptime_ranges'")? + .as_str() + .context("Expected 'custom_uptime_ranges' to be String")? + ); - let monitor_url = String::from(monitor.get("url") - .context("Expected monitor to have property 'url'")? - .as_str() - .context("Expected 'url' to be String")?); + let monitor_url = String::from( + monitor + .get("url") + .context("Expected monitor to have property 'url'")? + .as_str() + .context("Expected 'url' to be String")?, + ); -; - - state.uptimes.push( - Uptime { - name: monitor_name, - uptime: monitor_uptime, - response_time: monitor_rt, - status: monitor_status, - uptime_type: monitor_type, - url: monitor_url - } - ); + state.uptimes.push(Uptime { + name: monitor_name, + uptime: monitor_uptime, + response_time: monitor_rt, + status: monitor_status, + uptime_type: monitor_type, + url: monitor_url, + }); } state.last_updated = SystemTime::now(); diff --git a/templates/blogs/11-08-2023-postmortem.html b/templates/blogs/11-08-2023-postmortem.html index 9028af8..5485440 100644 --- a/templates/blogs/11-08-2023-postmortem.html +++ b/templates/blogs/11-08-2023-postmortem.html @@ -1,27 +1,27 @@ -

SMC Incident Postmortem 11/08/2023

+

NWS Incident Postmortem 11/08/2023

- On November 8th, 2023 at approximately 09:47 UTC, SMC suffered + On November 8th, 2023 at approximately 09:47 UTC, NWS suffered a complete outage. This outage resulted in the downtime of all - services hosted on SMC and the downtime of the SMC Management - Engine and the SMC dashboard. + services hosted on NWS and the downtime of the NWS Management + Engine and the NWS dashboard.

The incident lasted 38 minutes after which it was automatically - resolved and all services were restored. This is SMC' first + resolved and all services were restored. This is NWS' first outage event of 2023.

Cause

- SMC utilizes several tactics to ensure uptime. A component of + NWS utilizes several tactics to ensure uptime. A component of this is load balancing and failover. This service is currently provided by Cloudflare at the DNS level. Cloudflare sends - health check requests to SMC servers at specified intervals. If + health check requests to NWS servers at specified intervals. If it detects that one of the servers is down, it will remove the A record from entry.nws.nickorlow.com for that server (this domain - is where all services on SMC direct their traffic via a + is where all services on NWS direct their traffic via a CNAME).

@@ -31,34 +31,34 @@ error, but rather an HTTP timeout. This is an indication that the server may have lost network connectivity. When Cloudflare detected that the servers were down, it removed their A records from the - entry.nws.nickorlow.com domain. Since SMC Pennsylvania servers + entry.nws.nickorlow.com domain. Since NWS Pennsylvania servers have been undergoing maintenance since August 2023, this left no servers able to serve requests routed to entry.nws.nickorlow.com, resulting in the outage.

- SMC utilizes UptimeRobot for monitoring the uptime statistics of - services on SMC and SMC servers. This is the source of the - statistics shown on the SMC status page. + NWS utilizes UptimeRobot for monitoring the uptime statistics of + services on NWS and NWS servers. This is the source of the + statistics shown on the NWS status page.

- UptimeRobot did not detect either of the Texas SMC servers as being + UptimeRobot did not detect either of the Texas NWS servers as being offline for the duration of the outage. This is odd, as UptimeRobot - and Cloudflare did not agree on the status of SMC servers. Logs - on SMC servers showed that requests from UptimeRobot were being + and Cloudflare did not agree on the status of NWS servers. Logs + on NWS servers showed that requests from UptimeRobot were being served while no requests from Cloudflare were shown in the logs.

No firewall rules existed that could have blocked the healthcheck traffic from Cloudflare - for either of the SMC servers. There was no other configuration + for either of the NWS servers. There was no other configuration found that would have blocked these requests. As these servers are on different networks inside different buildings in different parts of Texas, their networking equipment is entirely separate. This rules out any failure of networking equipment owned - by SMC. This leads us to believe that the issue may have been + by NWS. This leads us to believe that the issue may have been caused due to an internet traffic anomaly, although we are currently unable to confirm that this is the cause of the issue.

@@ -82,7 +82,7 @@

- SMC will also investigate other methods of failover and load + NWS will also investigate other methods of failover and load balancing.

diff --git a/templates/blogs/goodbye-nws.html b/templates/blogs/goodbye-nws.html index ef2c4c1..2924c69 100644 --- a/templates/blogs/goodbye-nws.html +++ b/templates/blogs/goodbye-nws.html @@ -2,7 +2,7 @@

-Nick Web Services (NWS) is now Sharpe Mountain Compute (SMC). +Nick Web Services (NWS) is now Nick Web Services (NWS).

diff --git a/templates/dashboard.html b/templates/dashboard.html index 705ee2f..2f51d5c 100644 --- a/templates/dashboard.html +++ b/templates/dashboard.html @@ -1,2 +1,2 @@

Under Construction

-

The dashboard isn't ready yet! Use the old website for now!

+

The new dashboard isn't ready yet! Nobody but me used it anyways!

diff --git a/templates/index.html b/templates/index.html index be10b16..614f0c5 100644 --- a/templates/index.html +++ b/templates/index.html @@ -1,14 +1,12 @@ {%- import "uptime_table.html" as scope -%}
-

Nick Web Services is now

-

Sharpe Mountain Compute

+

Nick Web Services

Pottsville, PA - Philadelphia, PA - Austin, TX

- [ Old Website (NWS Branded) ]

- Sharpe Mountain Compute is a hosting service based out of the Commonwealth of Pennsylvania + Nick Web Services is a hosting service based out of the Commonwealth of Pennsylvania and the State of Texas. We are committed to achieving maximum uptime with better performance and a lower cost than any of the major cloud services. @@ -23,6 +21,10 @@ We operate four datacenters located across three cities in two states. This infr all year) for 2023 and 100% uptime for 2024 (YTD).

+

+ In 2024, YTD we have surpassed both Vercel and Github Pages in total uptime +

+

Compare us to our competitors!

{% call scope::uptime_table(uptime_infos) %}
diff --git a/templates/layout.html b/templates/layout.html index c8a2097..a75c6da 100644 --- a/templates/layout.html +++ b/templates/layout.html @@ -1,7 +1,19 @@ - Sharpe Mountain Compute + + {% if let Some(title) = page_title %} + {{ title }} + {% else %} + Nick Web Services + {% endif %} + + {% if let Some(desc) = page_desc %} + + {% else %} + + {% endif %} + @@ -27,16 +39,10 @@
-

Sharpe Mountain Compute

-

- formerly Nick Web Services (NWS) -

+

Nick Web Services

Copyright © Nicholas Orlowsky 2024

-

- "We're getting there" - SEPTA -

diff --git a/templates/system_status.html b/templates/system_status.html index 743df68..5f20e18 100644 --- a/templates/system_status.html +++ b/templates/system_status.html @@ -4,17 +4,22 @@

Datacenter Status

-The status of each of Sharpe Mountain Compute's 4 +The status of each of Nick Web Services's 4 datacenters.

{% call scope::uptime_table(dctr_uptime_infos) %} +

+Notice: Due to leasing issues, the Philadelphia datacenter will be offline until +at least May or August 2025 or it may be discontinued as an NWS location. +

+

Service Status

-The status of services people host on Sharpe Mountain Compute. +The status of services people host on Nick Web Services. Note that the uptime and performance of services hosted on -Sharpe Mountain Compute may be affected by factors not controlled by us such as +Nick Web Services may be affected by factors not controlled by us such as bad optimization or buggy software.