"),
};
- Ok(RawContentTemplate { content: blog_content })
+ Ok(RawContentTemplate {
+ page_title: Some("NWS | Blog Post".to_string()),
+ page_desc: Some("A Nick Web Services Blog Post.".to_string()),
+ content: blog_content,
+ })
}
-async fn dashboard_handler() -> Result, (axum::http::StatusCode, String)> {
- Ok(ContentTemplate { content: DashboardTemplate{} })
+async fn dashboard_handler(
+) -> Result, (axum::http::StatusCode, String)> {
+ Ok(ContentTemplate {
+ page_title: Some("NWS | Dashboard".to_string()),
+ page_desc: Some("Manage the services you have deployed on NWS.".to_string()),
+ content: DashboardTemplate {},
+ })
}
async fn index_handler(
axum::extract::State(state): axum::extract::State,
- )
- -> Result, (axum::http::StatusCode, String)> {
+) -> Result, (axum::http::StatusCode, String)> {
let uptimes: Vec = state.uptime_service.get_data();
let lu: DateTime = state.uptime_service.get_last_updated().into();
let lu_str = format!("{} UTC", lu.format("%B %e, %Y %T"));
@@ -154,33 +182,34 @@ async fn index_handler(
continue;
}
- uptime_infos.push(
- UptimeInfo {
- name: uptime.name,
- uptime: uptime.uptime,
- response_time: uptime.response_time,
- status: match uptime.status {
- UptimeStatus::Up => String::from("Up"),
- UptimeStatus::Down => String::from("DOWN"),
- UptimeStatus::Maintenance => String::from("Undergoing Maintenance"),
- _ => String::from("Unknown")
- },
- url: None
- }
- );
+ uptime_infos.push(UptimeInfo {
+ name: uptime.name,
+ uptime: uptime.uptime,
+ response_time: uptime.response_time,
+ status: match uptime.status {
+ UptimeStatus::Up => String::from("Up"),
+ UptimeStatus::Down => String::from("DOWN"),
+ UptimeStatus::Maintenance => String::from("Undergoing Maintenance"),
+ _ => String::from("Unknown"),
+ },
+ url: None,
+ });
}
- let index_template = IndexTemplate {
- uptime_infos,
- last_updated: lu_str
+ let index_template = IndexTemplate {
+ uptime_infos,
+ last_updated: lu_str,
};
- Ok(ContentTemplate { content: index_template })
+ Ok(ContentTemplate {
+ page_title: None,
+ page_desc: None,
+ content: index_template,
+ })
}
async fn status_handler(
axum::extract::State(state): axum::extract::State,
- )
- -> Result, (axum::http::StatusCode, String)> {
+) -> Result, (axum::http::StatusCode, String)> {
let uptimes: Vec = state.uptime_service.get_data();
let lu: DateTime = state.uptime_service.get_last_updated().into();
let lu_str = format!("{} UTC", lu.format("%B %e, %Y %T"));
@@ -189,48 +218,47 @@ async fn status_handler(
let mut sv_uptime_infos: Vec = vec![];
for uptime in uptimes {
-
match uptime.uptime_type {
UptimeType::Datacenter => {
- dc_uptime_infos.push(
- UptimeInfo {
- name: uptime.name,
- uptime: uptime.uptime,
- response_time: uptime.response_time,
- status: match uptime.status {
- UptimeStatus::Up => String::from("Up"),
- UptimeStatus::Down => String::from("DOWN"),
- UptimeStatus::Maintenance => String::from("Undergoing Maintenance"),
- _ => String::from("Unknown")
- },
- url: None
- }
- );
- },
- UptimeType::Service => {
- sv_uptime_infos.push(
- UptimeInfo {
- name: uptime.name,
- uptime: uptime.uptime,
- response_time: uptime.response_time,
- status: match uptime.status {
- UptimeStatus::Up => String::from("Up"),
- UptimeStatus::Down => String::from("DOWN"),
- UptimeStatus::Maintenance => String::from("Undergoing Maintenance"),
- _ => String::from("Unknown")
- },
- url: Some(uptime.url)
- }
- );
+ dc_uptime_infos.push(UptimeInfo {
+ name: uptime.name,
+ uptime: uptime.uptime,
+ response_time: uptime.response_time,
+ status: match uptime.status {
+ UptimeStatus::Up => String::from("Up"),
+ UptimeStatus::Down => String::from("DOWN"),
+ UptimeStatus::Maintenance => String::from("Undergoing Maintenance"),
+ _ => String::from("Unknown"),
+ },
+ url: None,
+ });
}
- _ => continue
+ UptimeType::Service => {
+ sv_uptime_infos.push(UptimeInfo {
+ name: uptime.name,
+ uptime: uptime.uptime,
+ response_time: uptime.response_time,
+ status: match uptime.status {
+ UptimeStatus::Up => String::from("Up"),
+ UptimeStatus::Down => String::from("DOWN"),
+ UptimeStatus::Maintenance => String::from("Undergoing Maintenance"),
+ _ => String::from("Unknown"),
+ },
+ url: Some(uptime.url),
+ });
+ }
+ _ => continue,
}
}
- let service_template = StatusTemplate {
- dctr_uptime_infos: dc_uptime_infos,
- svc_uptime_infos: sv_uptime_infos,
- last_updated: lu_str
+ let service_template = StatusTemplate {
+ dctr_uptime_infos: dc_uptime_infos,
+ svc_uptime_infos: sv_uptime_infos,
+ last_updated: lu_str,
};
- Ok(ContentTemplate { content: service_template })
+ Ok(ContentTemplate {
+ page_title: Some("NWS | System Status".to_string()),
+ page_desc: Some("Check the health of NWS datacenters and services hosted on NWS.".to_string()),
+ content: service_template,
+ })
}
diff --git a/src/uptime_service.rs b/src/uptime_service.rs
index 60c3cf3..a9d264c 100644
--- a/src/uptime_service.rs
+++ b/src/uptime_service.rs
@@ -1,12 +1,12 @@
-use std::collections::HashMap;
-use tokio::time::{sleep};
-use std::sync::{Arc, Mutex};
-use std::time::{Duration};
-use chrono::{Datelike, NaiveDate};
-use std::time::{SystemTime, UNIX_EPOCH};
+use anyhow::anyhow;
use anyhow::Context;
-use anyhow::{anyhow};
+use chrono::{Datelike, NaiveDate};
use log::*;
+use std::collections::HashMap;
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+use std::time::{SystemTime, UNIX_EPOCH};
+use tokio::time::sleep;
#[macro_use]
use dotenv::dotenv;
@@ -17,7 +17,7 @@ pub enum UptimeType {
Provider,
Service,
Datacenter,
- Unknown
+ Unknown,
}
#[derive(Debug, PartialEq, Clone)]
@@ -25,7 +25,7 @@ pub enum UptimeStatus {
Up,
Down,
Maintenance,
- Unknown
+ Unknown,
}
#[derive(Debug, Clone)]
@@ -34,28 +34,29 @@ pub struct Uptime {
pub uptime: String,
pub response_time: String,
pub status: UptimeStatus,
- pub uptime_type: UptimeType,
- pub url: String
+ pub uptime_type: UptimeType,
+ pub url: String,
}
#[derive(Debug, Clone)]
pub struct UptimeServiceState {
uptimes: Vec,
- last_updated: SystemTime
+ last_updated: SystemTime,
}
#[derive(Debug, Clone)]
pub struct UptimeService {
- state: Arc>
+ state: Arc>,
}
impl UptimeService {
const UPDATE_SECONDS: u64 = 300;
pub fn new() -> Self {
- let init_state = Arc::new(Mutex::new(
- UptimeServiceState { uptimes: vec![], last_updated: UNIX_EPOCH }
- ));
+ let init_state = Arc::new(Mutex::new(UptimeServiceState {
+ uptimes: vec![],
+ last_updated: UNIX_EPOCH,
+ }));
Self { state: init_state }
}
@@ -63,18 +64,18 @@ impl UptimeService {
info!("Starting UptimeService");
let cloned_state = Arc::clone(&self.state);
tokio::spawn(async move {
- loop {
- let clonedx_state = Arc::clone(&cloned_state);
- let res = Self::update_data(clonedx_state).await;
- match res {
- Err(err) => {
- error!("{}", err);
- },
- _ => {}
+ loop {
+ let clonedx_state = Arc::clone(&cloned_state);
+ let res = Self::update_data(clonedx_state).await;
+ match res {
+ Err(err) => {
+ error!("{}", err);
}
- sleep(tokio::time::Duration::from_secs(Self::UPDATE_SECONDS)).await;
+ _ => {}
}
- });
+ sleep(tokio::time::Duration::from_secs(Self::UPDATE_SECONDS)).await;
+ }
+ });
}
pub fn get_data(&self) -> Vec {
@@ -90,7 +91,6 @@ impl UptimeService {
}
async fn update_data(arc_state: Arc>) -> ::anyhow::Result<()> {
-
debug!("Starting data update for UptimeService");
let mut request_vars = HashMap::new();
@@ -102,9 +102,10 @@ impl UptimeService {
let current_year = chrono::Utc::today().year();
let january_1st = NaiveDate::from_ymd(current_year, 1, 1).and_hms(0, 0, 0);
- let duration = january_1st.signed_duration_since(NaiveDate::from_ymd(1970, 1, 1).and_hms(0, 0, 0));
+ let duration =
+ january_1st.signed_duration_since(NaiveDate::from_ymd(1970, 1, 1).and_hms(0, 0, 0));
let year_start = UNIX_EPOCH + Duration::from_secs(duration.num_seconds() as u64);
-
+
//let ranges = &format!(
// "{}_{}-{}_{}",
// thirty_days_ago.duration_since(SystemTime::UNIX_EPOCH)?.as_secs(),
@@ -122,107 +123,124 @@ impl UptimeService {
request_vars.insert("response_times", "1");
let client = reqwest::Client::new();
- let res = client.post("https://api.uptimerobot.com/v2/getMonitors")
+ let res = client
+ .post("https://api.uptimerobot.com/v2/getMonitors")
.form(&request_vars)
.send()
.await?;
- let resp = res.json::()
- .await?;
+ let resp = res.json::().await?;
- let monitors = resp.get("monitors")
- .context("Response did not have a monitors subobject")?
- .as_array()
- .context("Monitors subobject was not an array")?;
+ let monitors = resp
+ .get("monitors")
+ .context("Response did not have a monitors subobject")?
+ .as_array()
+ .context("Monitors subobject was not an array")?;
-
- let mut state = match arc_state.lock(){
+ let mut state = match arc_state.lock() {
Ok(val) => val,
- Err(_) => {return Err(anyhow!("Could not lock shared state"));}
+ Err(_) => {
+ return Err(anyhow!("Could not lock shared state"));
+ }
};
state.uptimes.clear();
for monitor in monitors {
- let monitor_fqn = monitor.get("friendly_name")
- .context("Monitor did not have property 'friendly_name'")?;
-
+ let monitor_fqn = monitor
+ .get("friendly_name")
+ .context("Monitor did not have property 'friendly_name'")?;
+
debug!("Monitor '{}' processing", monitor_fqn);
- let split_str: Vec<&str> = monitor_fqn.as_str()
- .context("Expected 'friendly_name' to be a string")?
- .split(".").collect();
+ let split_str: Vec<&str> = monitor_fqn
+ .as_str()
+ .context("Expected 'friendly_name' to be a string")?
+ .split(".")
+ .collect();
if split_str.len() != 2 {
debug!("Monitor '{}' excluded due to bad format", monitor_fqn);
continue;
}
- let monitor_nt = String::from(*split_str.get(0).context("Expected name to have first part")?);
- let monitor_name = String::from(*split_str.get(1).context("Expected name to have second part")?);
+ let monitor_nt = String::from(
+ *split_str
+ .get(0)
+ .context("Expected name to have first part")?,
+ );
+ let monitor_name = String::from(
+ *split_str
+ .get(1)
+ .context("Expected name to have second part")?,
+ );
let monitor_type = match monitor_nt.as_str() {
"datacenter" => UptimeType::Datacenter,
"service" => UptimeType::Service,
"competitor" => UptimeType::Provider,
- _ => UptimeType::Unknown
+ _ => UptimeType::Unknown,
};
-
if monitor_type == UptimeType::Unknown {
debug!("Monitor '{}' excluded due to unknown type", monitor_fqn);
continue;
}
- let monitor_status_num = monitor.get("status")
- .context("Expected monitor to have 'status' property")?
- .as_u64()
- .context("Expected 'status' property to be u64")?;
+ let monitor_status_num = monitor
+ .get("status")
+ .context("Expected monitor to have 'status' property")?
+ .as_u64()
+ .context("Expected 'status' property to be u64")?;
let monitor_status = match monitor_status_num {
0 => UptimeStatus::Maintenance,
1 | 8 | 9 => UptimeStatus::Down,
2 => UptimeStatus::Up,
- _ => UptimeStatus::Unknown
+ _ => UptimeStatus::Unknown,
};
if monitor_status == UptimeStatus::Unknown {
- debug!("Monitor '{}' excluded due to unknown status (status was {})", monitor_fqn, monitor_status_num);
+ debug!(
+ "Monitor '{}' excluded due to unknown status (status was {})",
+ monitor_fqn, monitor_status_num
+ );
continue;
}
- let monitor_rt_val = monitor.get("average_response_time")
- .context("Expected monitor to have property 'average_response_time'")?;
-
+ let monitor_rt_val = monitor
+ .get("average_response_time")
+ .context("Expected monitor to have property 'average_response_time'")?;
// Because UptimeRobot has the world's worst API ever
- // and decided that it's okay to return multiple datatypes
+ // and decided that it's okay to return multiple datatypes
// for one property based on how they're feeling
let monitor_rt = match monitor_rt_val.as_str() {
Some(string) => format!("{}ms", string),
- _ => format!("N/A")
+ _ => format!("N/A"),
};
- let monitor_uptime = format!("{}%",
- monitor.get("custom_uptime_ranges")
- .context("Expected monitor to have property 'custom_uptime_ranges'")?
- .as_str()
- .context("Expected 'custom_uptime_ranges' to be String")?
- );
+ let monitor_uptime = format!(
+ "{}%",
+ monitor
+ .get("custom_uptime_ranges")
+ .context("Expected monitor to have property 'custom_uptime_ranges'")?
+ .as_str()
+ .context("Expected 'custom_uptime_ranges' to be String")?
+ );
- let monitor_url = String::from(monitor.get("url")
- .context("Expected monitor to have property 'url'")?
- .as_str()
- .context("Expected 'url' to be String")?);
+ let monitor_url = String::from(
+ monitor
+ .get("url")
+ .context("Expected monitor to have property 'url'")?
+ .as_str()
+ .context("Expected 'url' to be String")?,
+ );
-;
-
- state.uptimes.push(
- Uptime {
- name: monitor_name,
- uptime: monitor_uptime,
- response_time: monitor_rt,
- status: monitor_status,
- uptime_type: monitor_type,
- url: monitor_url
- }
- );
+ state.uptimes.push(Uptime {
+ name: monitor_name,
+ uptime: monitor_uptime,
+ response_time: monitor_rt,
+ status: monitor_status,
+ uptime_type: monitor_type,
+ url: monitor_url,
+ });
}
state.last_updated = SystemTime::now();
diff --git a/templates/blogs/11-08-2023-postmortem.html b/templates/blogs/11-08-2023-postmortem.html
index 9028af8..5485440 100644
--- a/templates/blogs/11-08-2023-postmortem.html
+++ b/templates/blogs/11-08-2023-postmortem.html
@@ -1,27 +1,27 @@
-
SMC Incident Postmortem 11/08/2023
+
NWS Incident Postmortem 11/08/2023
- On November 8th, 2023 at approximately 09:47 UTC, SMC suffered
+ On November 8th, 2023 at approximately 09:47 UTC, NWS suffered
a complete outage. This outage resulted in the downtime of all
- services hosted on SMC and the downtime of the SMC Management
- Engine and the SMC dashboard.
+ services hosted on NWS and the downtime of the NWS Management
+ Engine and the NWS dashboard.
The incident lasted 38 minutes after which it was automatically
- resolved and all services were restored. This is SMC' first
+ resolved and all services were restored. This is NWS' first
outage event of 2023.
Cause
- SMC utilizes several tactics to ensure uptime. A component of
+ NWS utilizes several tactics to ensure uptime. A component of
this is load balancing and failover. This service is currently
provided by Cloudflare at the DNS level. Cloudflare sends
- health check requests to SMC servers at specified intervals. If
+ health check requests to NWS servers at specified intervals. If
it detects that one of the servers is down, it will remove the
A record from entry.nws.nickorlow.com for that server (this domain
- is where all services on SMC direct their traffic via a
+ is where all services on NWS direct their traffic via a
CNAME).
@@ -31,34 +31,34 @@
error, but rather an HTTP timeout. This is an indication that the
server may have lost network connectivity. When Cloudflare detected that the
servers were down, it removed their A records from the
- entry.nws.nickorlow.com domain. Since SMC Pennsylvania servers
+ entry.nws.nickorlow.com domain. Since NWS Pennsylvania servers
have been undergoing maintenance since August 2023, this left no
servers able to serve requests routed to entry.nws.nickorlow.com,
resulting in the outage.
- SMC utilizes UptimeRobot for monitoring the uptime statistics of
- services on SMC and SMC servers. This is the source of the
- statistics shown on the SMC status page.
+ NWS utilizes UptimeRobot for monitoring the uptime statistics of
+ services on NWS and NWS servers. This is the source of the
+ statistics shown on the NWS status page.
- UptimeRobot did not detect either of the Texas SMC servers as being
+ UptimeRobot did not detect either of the Texas NWS servers as being
offline for the duration of the outage. This is odd, as UptimeRobot
- and Cloudflare did not agree on the status of SMC servers. Logs
- on SMC servers showed that requests from UptimeRobot were being
+ and Cloudflare did not agree on the status of NWS servers. Logs
+ on NWS servers showed that requests from UptimeRobot were being
served while no requests from Cloudflare were shown in the logs.
No firewall rules existed that could have blocked the healthcheck traffic from Cloudflare
- for either of the SMC servers. There was no other configuration
+ for either of the NWS servers. There was no other configuration
found that would have blocked these requests. As these servers
are on different networks inside different buildings in different
parts of Texas, their networking equipment is entirely separate.
This rules out any failure of networking equipment owned
- by SMC. This leads us to believe that the issue may have been
+ by NWS. This leads us to believe that the issue may have been
caused due to an internet traffic anomaly, although we are currently
unable to confirm that this is the cause of the issue.
@@ -82,7 +82,7 @@
- SMC will also investigate other methods of failover and load
+ NWS will also investigate other methods of failover and load
balancing.
- Sharpe Mountain Compute is a hosting service based out of the Commonwealth of Pennsylvania
+ Nick Web Services is a hosting service based out of the Commonwealth of Pennsylvania
and the State of Texas.
We are committed to achieving maximum uptime with better performance and a lower
cost than any of the major cloud services.
@@ -23,6 +21,10 @@ We operate four datacenters located across three cities in two states. This infr
all year) for 2023 and 100% uptime for 2024 (YTD).
+
+ In 2024, YTD we have surpassed both Vercel and Github Pages in total uptime
+
+
Compare us to our competitors!
{% call scope::uptime_table(uptime_infos) %}
diff --git a/templates/layout.html b/templates/layout.html
index c8a2097..a75c6da 100644
--- a/templates/layout.html
+++ b/templates/layout.html
@@ -1,7 +1,19 @@
- Sharpe Mountain Compute
+
+ {% if let Some(title) = page_title %}
+ {{ title }}
+ {% else %}
+ Nick Web Services
+ {% endif %}
+
+ {% if let Some(desc) = page_desc %}
+
+ {% else %}
+
+ {% endif %}
+
@@ -27,16 +39,10 @@
+Notice: Due to leasing issues, the Philadelphia datacenter will be offline until
+at least May or August 2025 or it may be discontinued as an NWS location.
+
+
Service Status
-The status of services people host on Sharpe Mountain Compute.
+The status of services people host on Nick Web Services.
Note that the uptime and performance of services hosted on
-Sharpe Mountain Compute may be affected by factors not controlled by us such as
+Nick Web Services may be affected by factors not controlled by us such as
bad optimization or buggy software.