init commit
This commit is contained in:
commit
edc0dd01e8
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
/target
|
||||||
|
.env
|
2121
Cargo.lock
generated
Normal file
2121
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
23
Cargo.toml
Normal file
23
Cargo.toml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
[package]
|
||||||
|
name = "website"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
askama = { version = "0.12.1", features = ["with-axum"] }
|
||||||
|
askama_axum = "0.4.0"
|
||||||
|
axum = "0.7.4"
|
||||||
|
tower-http = { version = "0.5.1", features = ["fs", "trace"] }
|
||||||
|
tokio = { version = "1", features = ["full"] }
|
||||||
|
reqwest = { version = "0.12.4", features = ["json"] }
|
||||||
|
serde = "1.0.201"
|
||||||
|
serde_json = "1.0.117"
|
||||||
|
chrono = "0.4.38"
|
||||||
|
rust_decimal = "1.35.0"
|
||||||
|
anyhow = "1.0.83"
|
||||||
|
env_logger = "0.11.3"
|
||||||
|
log = "0.4.21"
|
||||||
|
dotenv = "0.15.0"
|
||||||
|
dotenv_codegen = "0.15.0"
|
14
Dockerfile
Normal file
14
Dockerfile
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
FROM rust:1.74.1 as build
|
||||||
|
|
||||||
|
ENV PKG_CONFIG_ALLOW_CROSS=1
|
||||||
|
|
||||||
|
WORKDIR .
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN cargo install --path .
|
||||||
|
|
||||||
|
ENV RUST_LOG=info
|
||||||
|
ENV EXPOSE_PORT=80
|
||||||
|
|
||||||
|
EXPOSE 80
|
||||||
|
ENTRYPOINT ["website"]
|
BIN
assets/flag-images/us-pa.png
Normal file
BIN
assets/flag-images/us-pa.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.1 KiB |
BIN
assets/flag-images/us-tx.png
Normal file
BIN
assets/flag-images/us-tx.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 220 B |
BIN
assets/flag-images/us.png
Normal file
BIN
assets/flag-images/us.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 476 B |
40
assets/style.css
Normal file
40
assets/style.css
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
* {
|
||||||
|
font-family: serif;
|
||||||
|
}
|
||||||
|
|
||||||
|
th {
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
td {
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
table, th, td {
|
||||||
|
border: 1px solid black;
|
||||||
|
}
|
||||||
|
|
||||||
|
body {
|
||||||
|
background-color: #d2e0ec;
|
||||||
|
margin: 20px auto;
|
||||||
|
max-width: 750px;
|
||||||
|
}
|
||||||
|
|
||||||
|
a {
|
||||||
|
text-decoration: none;
|
||||||
|
color: #114488;
|
||||||
|
}
|
||||||
|
|
||||||
|
img {
|
||||||
|
max-width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.flag-img {
|
||||||
|
height: 30px;
|
||||||
|
}
|
||||||
|
|
||||||
|
p.lineitem::after {
|
||||||
|
content: " ";
|
||||||
|
flex: 1;
|
||||||
|
border-bottom: 1px dotted #000;
|
||||||
|
}
|
10
makefile
Normal file
10
makefile
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
.PHONY: run
|
||||||
|
|
||||||
|
run:
|
||||||
|
RUST_LOG=debug cargo run
|
||||||
|
|
||||||
|
docker-build:
|
||||||
|
docker build . -t smc-website:dev
|
||||||
|
|
||||||
|
docker-run: docker-build
|
||||||
|
docker run -p 8085:80 smc-website:dev
|
236
src/main.rs
Normal file
236
src/main.rs
Normal file
|
@ -0,0 +1,236 @@
|
||||||
|
use axum::{routing::get, Router};
|
||||||
|
use chrono::DateTime;
|
||||||
|
use chrono::offset::Utc;
|
||||||
|
use log::*;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate dotenv_codegen;
|
||||||
|
extern crate dotenv;
|
||||||
|
use dotenv::dotenv;
|
||||||
|
use std::env;
|
||||||
|
|
||||||
|
mod uptime_service;
|
||||||
|
|
||||||
|
use uptime_service::{UptimeService, Uptime, UptimeType, UptimeStatus};
|
||||||
|
|
||||||
|
#[derive(askama::Template)]
|
||||||
|
#[template(path = "layout.html")]
|
||||||
|
struct ContentTemplate <T: askama::Template> {
|
||||||
|
content: T
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(askama::Template)]
|
||||||
|
#[template(path = "layout.html")]
|
||||||
|
struct RawContentTemplate {
|
||||||
|
content: String
|
||||||
|
}
|
||||||
|
|
||||||
|
struct UptimeInfo {
|
||||||
|
name: String,
|
||||||
|
uptime: String,
|
||||||
|
response_time: String,
|
||||||
|
status: String,
|
||||||
|
url: Option<String>
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(askama::Template)]
|
||||||
|
#[template(path = "index.html")]
|
||||||
|
struct IndexTemplate {
|
||||||
|
uptime_infos: Vec<UptimeInfo>,
|
||||||
|
last_updated: String
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(askama::Template)]
|
||||||
|
#[template(path = "system_status.html")]
|
||||||
|
struct StatusTemplate {
|
||||||
|
dctr_uptime_infos: Vec<UptimeInfo>,
|
||||||
|
svc_uptime_infos: Vec<UptimeInfo>,
|
||||||
|
last_updated: String
|
||||||
|
}
|
||||||
|
|
||||||
|
struct BlogInfo {
|
||||||
|
title: String,
|
||||||
|
date: String,
|
||||||
|
url: String
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(askama::Template)]
|
||||||
|
#[template(path = "blog.html")]
|
||||||
|
struct BlogTemplate {
|
||||||
|
blogs: Vec<BlogInfo>
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(askama::Template)]
|
||||||
|
#[template(path = "dashboard.html")]
|
||||||
|
struct DashboardTemplate {}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct AppState {
|
||||||
|
uptime_service: UptimeService
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
dotenv().ok();
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
|
info!("Starting Sharpe Mountain Compute Website");
|
||||||
|
|
||||||
|
let uptime_service: UptimeService = UptimeService::new();
|
||||||
|
uptime_service.start();
|
||||||
|
|
||||||
|
let state = AppState { uptime_service };
|
||||||
|
|
||||||
|
|
||||||
|
let app = Router::new()
|
||||||
|
.route("/", get(index_handler))
|
||||||
|
.route("/system_status", get(status_handler))
|
||||||
|
.route("/dashboard", get(dashboard_handler))
|
||||||
|
.route("/blog", get(blog_handler))
|
||||||
|
.route("/blogs/:blog_name", get(single_blog_handler))
|
||||||
|
.nest_service("/assets", tower_http::services::ServeDir::new("assets"))
|
||||||
|
.with_state(state);
|
||||||
|
|
||||||
|
|
||||||
|
let port_num = env::var("EXPOSE_PORT")
|
||||||
|
.unwrap_or("3000".to_string());
|
||||||
|
|
||||||
|
|
||||||
|
let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", port_num))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
info!("Listening on port {}", port_num);
|
||||||
|
|
||||||
|
axum::serve(listener, app).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn blog_handler() -> Result<ContentTemplate<impl askama::Template>, (axum::http::StatusCode, String)> {
|
||||||
|
Ok(ContentTemplate { content: BlogTemplate{ blogs: vec![
|
||||||
|
BlogInfo {
|
||||||
|
title: String::from("Goodbye, NWS"),
|
||||||
|
date: String::from("May 15th, 2024"),
|
||||||
|
url: String::from("goodbye-nws"),
|
||||||
|
},
|
||||||
|
BlogInfo {
|
||||||
|
title: String::from("Downtime Incident Postmortem"),
|
||||||
|
date: String::from("November 11th, 2023"),
|
||||||
|
url: String::from("11-08-2023-postmortem"),
|
||||||
|
},
|
||||||
|
BlogInfo {
|
||||||
|
title: String::from("SSL on Container Deployment Service (at nickorlow.com)"),
|
||||||
|
date: String::from("July 12th, 2023"),
|
||||||
|
url: String::from("https://nickorlow.com/blogs/side-project-7-12-23.html"),
|
||||||
|
},
|
||||||
|
] } })
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn single_blog_handler(
|
||||||
|
axum::extract::Path((blog_name)): axum::extract::Path<(String)>
|
||||||
|
) -> Result<RawContentTemplate, (axum::http::StatusCode, String)> {
|
||||||
|
let blog_content = match std::fs::read_to_string(format!("templates/blogs/{}.html", blog_name)) {
|
||||||
|
Ok(ctn) => ctn,
|
||||||
|
_ => String::from("<h1>Not Found!</h1>")
|
||||||
|
};
|
||||||
|
Ok(RawContentTemplate { content: blog_content })
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn dashboard_handler() -> Result<ContentTemplate<impl askama::Template>, (axum::http::StatusCode, String)> {
|
||||||
|
Ok(ContentTemplate { content: DashboardTemplate{} })
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn index_handler(
|
||||||
|
axum::extract::State(state): axum::extract::State<AppState>,
|
||||||
|
)
|
||||||
|
-> Result<ContentTemplate<impl askama::Template>, (axum::http::StatusCode, String)> {
|
||||||
|
let uptimes: Vec<Uptime> = state.uptime_service.get_data();
|
||||||
|
let lu: DateTime<Utc> = state.uptime_service.get_last_updated().into();
|
||||||
|
let lu_str = format!("{} UTC", lu.format("%B %e, %Y %T"));
|
||||||
|
|
||||||
|
let mut uptime_infos: Vec<UptimeInfo> = vec![];
|
||||||
|
|
||||||
|
for uptime in uptimes {
|
||||||
|
if uptime.uptime_type != UptimeType::Provider {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptime_infos.push(
|
||||||
|
UptimeInfo {
|
||||||
|
name: uptime.name,
|
||||||
|
uptime: uptime.uptime,
|
||||||
|
response_time: uptime.response_time,
|
||||||
|
status: match uptime.status {
|
||||||
|
UptimeStatus::Up => String::from("Up"),
|
||||||
|
UptimeStatus::Down => String::from("DOWN"),
|
||||||
|
UptimeStatus::Maintenance => String::from("Undergoing Maintenance"),
|
||||||
|
_ => String::from("Unknown")
|
||||||
|
},
|
||||||
|
url: None
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let index_template = IndexTemplate {
|
||||||
|
uptime_infos,
|
||||||
|
last_updated: lu_str
|
||||||
|
};
|
||||||
|
Ok(ContentTemplate { content: index_template })
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn status_handler(
|
||||||
|
axum::extract::State(state): axum::extract::State<AppState>,
|
||||||
|
)
|
||||||
|
-> Result<ContentTemplate<impl askama::Template>, (axum::http::StatusCode, String)> {
|
||||||
|
let uptimes: Vec<Uptime> = state.uptime_service.get_data();
|
||||||
|
let lu: DateTime<Utc> = state.uptime_service.get_last_updated().into();
|
||||||
|
let lu_str = format!("{} UTC", lu.format("%B %e, %Y %T"));
|
||||||
|
|
||||||
|
let mut dc_uptime_infos: Vec<UptimeInfo> = vec![];
|
||||||
|
let mut sv_uptime_infos: Vec<UptimeInfo> = vec![];
|
||||||
|
|
||||||
|
for uptime in uptimes {
|
||||||
|
|
||||||
|
match uptime.uptime_type {
|
||||||
|
UptimeType::Datacenter => {
|
||||||
|
dc_uptime_infos.push(
|
||||||
|
UptimeInfo {
|
||||||
|
name: uptime.name,
|
||||||
|
uptime: uptime.uptime,
|
||||||
|
response_time: uptime.response_time,
|
||||||
|
status: match uptime.status {
|
||||||
|
UptimeStatus::Up => String::from("Up"),
|
||||||
|
UptimeStatus::Down => String::from("DOWN"),
|
||||||
|
UptimeStatus::Maintenance => String::from("Undergoing Maintenance"),
|
||||||
|
_ => String::from("Unknown")
|
||||||
|
},
|
||||||
|
url: None
|
||||||
|
}
|
||||||
|
);
|
||||||
|
},
|
||||||
|
UptimeType::Service => {
|
||||||
|
sv_uptime_infos.push(
|
||||||
|
UptimeInfo {
|
||||||
|
name: uptime.name,
|
||||||
|
uptime: uptime.uptime,
|
||||||
|
response_time: uptime.response_time,
|
||||||
|
status: match uptime.status {
|
||||||
|
UptimeStatus::Up => String::from("Up"),
|
||||||
|
UptimeStatus::Down => String::from("DOWN"),
|
||||||
|
UptimeStatus::Maintenance => String::from("Undergoing Maintenance"),
|
||||||
|
_ => String::from("Unknown")
|
||||||
|
},
|
||||||
|
url: Some(uptime.url)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ => continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let service_template = StatusTemplate {
|
||||||
|
dctr_uptime_infos: dc_uptime_infos,
|
||||||
|
svc_uptime_infos: sv_uptime_infos,
|
||||||
|
last_updated: lu_str
|
||||||
|
};
|
||||||
|
Ok(ContentTemplate { content: service_template })
|
||||||
|
}
|
232
src/uptime_service.rs
Normal file
232
src/uptime_service.rs
Normal file
|
@ -0,0 +1,232 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use tokio::time::{sleep};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::{Duration};
|
||||||
|
use chrono::{Datelike, NaiveDate};
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
use anyhow::Context;
|
||||||
|
use anyhow::{anyhow};
|
||||||
|
use log::*;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
use dotenv::dotenv;
|
||||||
|
use std::env;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
|
pub enum UptimeType {
|
||||||
|
Provider,
|
||||||
|
Service,
|
||||||
|
Datacenter,
|
||||||
|
Unknown
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
|
pub enum UptimeStatus {
|
||||||
|
Up,
|
||||||
|
Down,
|
||||||
|
Maintenance,
|
||||||
|
Unknown
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Uptime {
|
||||||
|
pub name: String,
|
||||||
|
pub uptime: String,
|
||||||
|
pub response_time: String,
|
||||||
|
pub status: UptimeStatus,
|
||||||
|
pub uptime_type: UptimeType,
|
||||||
|
pub url: String
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct UptimeServiceState {
|
||||||
|
uptimes: Vec<Uptime>,
|
||||||
|
last_updated: SystemTime
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct UptimeService {
|
||||||
|
state: Arc<Mutex<UptimeServiceState>>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UptimeService {
|
||||||
|
const UPDATE_SECONDS: u64 = 300;
|
||||||
|
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let init_state = Arc::new(Mutex::new(
|
||||||
|
UptimeServiceState { uptimes: vec![], last_updated: UNIX_EPOCH }
|
||||||
|
));
|
||||||
|
Self { state: init_state }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start(&self) {
|
||||||
|
info!("Starting UptimeService");
|
||||||
|
let cloned_state = Arc::clone(&self.state);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
let clonedx_state = Arc::clone(&cloned_state);
|
||||||
|
let res = Self::update_data(clonedx_state).await;
|
||||||
|
match res {
|
||||||
|
Err(err) => {
|
||||||
|
error!("{}", err);
|
||||||
|
},
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
sleep(tokio::time::Duration::from_secs(Self::UPDATE_SECONDS)).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_data(&self) -> Vec<Uptime> {
|
||||||
|
let state = self.state.lock().unwrap();
|
||||||
|
let uptimes = state.uptimes.clone();
|
||||||
|
return uptimes;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_last_updated(&self) -> SystemTime {
|
||||||
|
let state = self.state.lock().unwrap();
|
||||||
|
let lu = state.last_updated.clone();
|
||||||
|
return lu;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_data(arc_state: Arc<Mutex<UptimeServiceState>>) -> ::anyhow::Result<()> {
|
||||||
|
|
||||||
|
debug!("Starting data update for UptimeService");
|
||||||
|
|
||||||
|
let mut request_vars = HashMap::new();
|
||||||
|
let api_key = env::var("UPTIMEROBOT_API_KEY")?;
|
||||||
|
request_vars.insert("api_key", api_key.as_str());
|
||||||
|
request_vars.insert("all_time_uptime_ratio", "1");
|
||||||
|
let now = SystemTime::now();
|
||||||
|
//let thirty_days_ago = now - Duration::from_secs(30 * 24 * 3600);
|
||||||
|
|
||||||
|
let current_year = chrono::Utc::today().year();
|
||||||
|
let january_1st = NaiveDate::from_ymd(current_year, 1, 1).and_hms(0, 0, 0);
|
||||||
|
let duration = january_1st.signed_duration_since(NaiveDate::from_ymd(1970, 1, 1).and_hms(0, 0, 0));
|
||||||
|
let year_start = UNIX_EPOCH + Duration::from_secs(duration.num_seconds() as u64);
|
||||||
|
|
||||||
|
//let ranges = &format!(
|
||||||
|
// "{}_{}-{}_{}",
|
||||||
|
// thirty_days_ago.duration_since(SystemTime::UNIX_EPOCH)?.as_secs(),
|
||||||
|
// now.duration_since(SystemTime::UNIX_EPOCH)?.as_secs(),
|
||||||
|
// year_start.duration_since(SystemTime::UNIX_EPOCH)?.as_secs(),
|
||||||
|
// now.duration_since(SystemTime::UNIX_EPOCH)?.as_secs(),
|
||||||
|
//);
|
||||||
|
let ranges = &format!(
|
||||||
|
"{}_{}",
|
||||||
|
year_start.duration_since(SystemTime::UNIX_EPOCH)?.as_secs(),
|
||||||
|
now.duration_since(SystemTime::UNIX_EPOCH)?.as_secs(),
|
||||||
|
);
|
||||||
|
request_vars.insert("custom_uptime_ranges", ranges);
|
||||||
|
request_vars.insert("response_times_average", "1440");
|
||||||
|
request_vars.insert("response_times", "1");
|
||||||
|
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
let res = client.post("https://api.uptimerobot.com/v2/getMonitors")
|
||||||
|
.form(&request_vars)
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let resp = res.json::<serde_json::Value>()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let monitors = resp.get("monitors")
|
||||||
|
.context("Response did not have a monitors subobject")?
|
||||||
|
.as_array()
|
||||||
|
.context("Monitors subobject was not an array")?;
|
||||||
|
|
||||||
|
|
||||||
|
let mut state = match arc_state.lock(){
|
||||||
|
Ok(val) => val,
|
||||||
|
Err(_) => {return Err(anyhow!("Could not lock shared state"));}
|
||||||
|
};
|
||||||
|
state.uptimes.clear();
|
||||||
|
for monitor in monitors {
|
||||||
|
let monitor_fqn = monitor.get("friendly_name")
|
||||||
|
.context("Monitor did not have property 'friendly_name'")?;
|
||||||
|
|
||||||
|
debug!("Monitor '{}' processing", monitor_fqn);
|
||||||
|
|
||||||
|
let split_str: Vec<&str> = monitor_fqn.as_str()
|
||||||
|
.context("Expected 'friendly_name' to be a string")?
|
||||||
|
.split(".").collect();
|
||||||
|
if split_str.len() != 2 {
|
||||||
|
debug!("Monitor '{}' excluded due to bad format", monitor_fqn);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let monitor_nt = String::from(*split_str.get(0).context("Expected name to have first part")?);
|
||||||
|
let monitor_name = String::from(*split_str.get(1).context("Expected name to have second part")?);
|
||||||
|
let monitor_type = match monitor_nt.as_str() {
|
||||||
|
"datacenter" => UptimeType::Datacenter,
|
||||||
|
"service" => UptimeType::Service,
|
||||||
|
"competitor" => UptimeType::Provider,
|
||||||
|
_ => UptimeType::Unknown
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
if monitor_type == UptimeType::Unknown {
|
||||||
|
debug!("Monitor '{}' excluded due to unknown type", monitor_fqn);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let monitor_status_num = monitor.get("status")
|
||||||
|
.context("Expected monitor to have 'status' property")?
|
||||||
|
.as_u64()
|
||||||
|
.context("Expected 'status' property to be u64")?;
|
||||||
|
|
||||||
|
let monitor_status = match monitor_status_num {
|
||||||
|
0 => UptimeStatus::Maintenance,
|
||||||
|
1 | 8 | 9 => UptimeStatus::Down,
|
||||||
|
2 => UptimeStatus::Up,
|
||||||
|
_ => UptimeStatus::Unknown
|
||||||
|
};
|
||||||
|
|
||||||
|
if monitor_status == UptimeStatus::Unknown {
|
||||||
|
debug!("Monitor '{}' excluded due to unknown status (status was {})", monitor_fqn, monitor_status_num);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let monitor_rt_val = monitor.get("average_response_time")
|
||||||
|
.context("Expected monitor to have property 'average_response_time'")?;
|
||||||
|
|
||||||
|
|
||||||
|
// Because UptimeRobot has the world's worst API ever
|
||||||
|
// and decided that it's okay to return multiple datatypes
|
||||||
|
// for one property based on how they're feeling
|
||||||
|
let monitor_rt = match monitor_rt_val.as_str() {
|
||||||
|
Some(string) => format!("{}ms", string),
|
||||||
|
_ => format!("N/A")
|
||||||
|
};
|
||||||
|
|
||||||
|
let monitor_uptime = format!("{}%",
|
||||||
|
monitor.get("custom_uptime_ranges")
|
||||||
|
.context("Expected monitor to have property 'custom_uptime_ranges'")?
|
||||||
|
.as_str()
|
||||||
|
.context("Expected 'custom_uptime_ranges' to be String")?
|
||||||
|
);
|
||||||
|
|
||||||
|
let monitor_url = String::from(monitor.get("url")
|
||||||
|
.context("Expected monitor to have property 'url'")?
|
||||||
|
.as_str()
|
||||||
|
.context("Expected 'url' to be String")?);
|
||||||
|
|
||||||
|
;
|
||||||
|
|
||||||
|
state.uptimes.push(
|
||||||
|
Uptime {
|
||||||
|
name: monitor_name,
|
||||||
|
uptime: monitor_uptime,
|
||||||
|
response_time: monitor_rt,
|
||||||
|
status: monitor_status,
|
||||||
|
uptime_type: monitor_type,
|
||||||
|
url: monitor_url
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
state.last_updated = SystemTime::now();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
18
templates/blog.html
Normal file
18
templates/blog.html
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
<h1>Blog</h1>
|
||||||
|
|
||||||
|
{% for blog in blogs %}
|
||||||
|
<div style="display:flex;justify-content:space-around; padding-bottom: 5px;">
|
||||||
|
<p style="margin-bottom: 0px; padding-right: 8px;">
|
||||||
|
<a
|
||||||
|
{% if blog.url.contains("https://") %}
|
||||||
|
href="{{ blog.url }}"
|
||||||
|
{% else %}
|
||||||
|
href="/blogs/{{ blog.url }}"
|
||||||
|
{% endif %}
|
||||||
|
>
|
||||||
|
[ {{ blog.title }} ]
|
||||||
|
</a></p>
|
||||||
|
<div style="flex-grow: 1; border-bottom: 1px dotted black;"></div>
|
||||||
|
<p style="margin-bottom: 0px; padding-left: 8px;"><i>{{ blog.date }}</i></p>
|
||||||
|
</div>
|
||||||
|
{% endfor %}
|
89
templates/blogs/11-08-2023-postmortem.html
Normal file
89
templates/blogs/11-08-2023-postmortem.html
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
<h1>SMC Incident Postmortem 11/08/2023</h1>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
On November 8th, 2023 at approximately 09:47 UTC, SMC suffered
|
||||||
|
a complete outage. This outage resulted in the downtime of all
|
||||||
|
services hosted on SMC and the downtime of the SMC Management
|
||||||
|
Engine and the SMC dashboard.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
The incident lasted 38 minutes after which it was automatically
|
||||||
|
resolved and all services were restored. This is SMC' first
|
||||||
|
outage event of 2023.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h2>Cause</h2>
|
||||||
|
<p>
|
||||||
|
SMC utilizes several tactics to ensure uptime. A component of
|
||||||
|
this is load balancing and failover. This service is currently
|
||||||
|
provided by Cloudflare at the DNS level. Cloudflare sends
|
||||||
|
health check requests to SMC servers at specified intervals. If
|
||||||
|
it detects that one of the servers is down, it will remove the
|
||||||
|
A record from entry.nws.nickorlow.com for that server (this domain
|
||||||
|
is where all services on SMC direct their traffic via a
|
||||||
|
CNAME).
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
At around 09:47 UTC, Cloudflare detected that our servers in
|
||||||
|
Texas (Austin and Hill Country) were down. It did not detect an
|
||||||
|
error, but rather an HTTP timeout. This is an indication that the
|
||||||
|
server may have lost network connectivity. When Cloudflare detected that the
|
||||||
|
servers were down, it removed their A records from the
|
||||||
|
entry.nws.nickorlow.com domain. Since SMC Pennsylvania servers
|
||||||
|
have been undergoing maintenance since August 2023, this left no
|
||||||
|
servers able to serve requests routed to entry.nws.nickorlow.com,
|
||||||
|
resulting in the outage.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
SMC utilizes UptimeRobot for monitoring the uptime statistics of
|
||||||
|
services on SMC and SMC servers. This is the source of the
|
||||||
|
statistics shown on the SMC status page.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
UptimeRobot did not detect either of the Texas SMC servers as being
|
||||||
|
offline for the duration of the outage. This is odd, as UptimeRobot
|
||||||
|
and Cloudflare did not agree on the status of SMC servers. Logs
|
||||||
|
on SMC servers showed that requests from UptimeRobot were being
|
||||||
|
served while no requests from Cloudflare were shown in the logs.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
No firewall rules existed that could have blocked the healthcheck traffic from Cloudflare
|
||||||
|
for either of the SMC servers. There was no other configuration
|
||||||
|
found that would have blocked these requests. As these servers
|
||||||
|
are on different networks inside different buildings in different
|
||||||
|
parts of Texas, their networking equipment is entirely separate.
|
||||||
|
This rules out any failure of networking equipment owned
|
||||||
|
by SMC. This leads us to believe that the issue may have been
|
||||||
|
caused due to an internet traffic anomaly, although we are currently
|
||||||
|
unable to confirm that this is the cause of the issue.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
This is being actively investigated to find a more concrete root
|
||||||
|
cause. This postmortem will be updated if any new information is
|
||||||
|
found.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
A similar event occurred on November 12th, 2023 lasting for 2 seconds.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h2>Fix</h2>
|
||||||
|
<p>
|
||||||
|
The common factor between both of these servers is that they both use
|
||||||
|
Spectrum for their ISP and that they are located near Austin, Texas.
|
||||||
|
The Pennsylvania server maintenance will be expedited so that we have
|
||||||
|
servers online that operate with no commonalities.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
SMC will also investigate other methods of failover and load
|
||||||
|
balancing.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>Last updated on November 16th, 2023</p>
|
9
templates/blogs/goodbye-nws.html
Normal file
9
templates/blogs/goodbye-nws.html
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
<h1>Goodbye, NWS</h1>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
<b>
|
||||||
|
Nick Web Services (NWS) is now Sharpe Mountain Compute (SMC).
|
||||||
|
</b>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>That is all</p>
|
2
templates/dashboard.html
Normal file
2
templates/dashboard.html
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
<h1>Under Construction</h1>
|
||||||
|
<p>The dashboard isn't ready yet! Use the <a href="https://nws.nickorlow.com/dashboard">old website</a> for now!</p>
|
30
templates/index.html
Normal file
30
templates/index.html
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
{%- import "uptime_table.html" as scope -%}
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<div style="display: flex; align-items: baseline;">
|
||||||
|
<h1 style="margin-bottom: 0px;">Sharpe Mountain Compute</h1>
|
||||||
|
<p style="margin-bottom: 0px; margin-left: 2px;">fka Nick Web Services</p>
|
||||||
|
</div>
|
||||||
|
<p style="margin-top: 0px;">Pottsville, PA - Philadelphia, PA - Austin, TX</p>
|
||||||
|
|
||||||
|
<a href="https://nws.nickorlow.com">[ Old Website (NWS Branded) ]</a>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
Sharpe Mountain Compute is a hosting service based out of the Commonwealth of Pennsylvania
|
||||||
|
and the State of Texas.
|
||||||
|
We are committed to achieving maximum uptime with better performance and a lower
|
||||||
|
cost than any of the major cloud services.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
We operate four datacenters located across three cities in two states. This infrastructure setup ensures redundancy and failover capabilities, minimizing downtime risks. Additionally, the geographical distribution enhances speed and accessibility, reducing latency for users across different regions.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
This has led to us maintaining four nines availability (99.9931% ; 38 minutes of downtime
|
||||||
|
all year) for 2023 and 100% uptime for 2024 (YTD).
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h3>Compare us to our competitors!</h3>
|
||||||
|
{% call scope::uptime_table(uptime_infos) %}
|
||||||
|
</div>
|
38
templates/layout.html
Normal file
38
templates/layout.html
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
<head>
|
||||||
|
<title>Sharpe Mountain Compute</title>
|
||||||
|
<link rel="stylesheet" href="/assets/style.css">
|
||||||
|
<link rel="icon" type="image/x-icon" href="/assets/favicon.ico">
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<nav>
|
||||||
|
<div style="display: flex; justify-content: space-between;">
|
||||||
|
<div>
|
||||||
|
<a href="/">[ Home ]</a>
|
||||||
|
<a href="/system_status">[ System Status ]</a>
|
||||||
|
<a href="/blog">[ Blog ]</a>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<a href="/dashboard">[ Dashboard ]</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
{{ content|safe }}
|
||||||
|
|
||||||
|
<footer>
|
||||||
|
<hr />
|
||||||
|
<div style="display: flex; justify-content: space-between;">
|
||||||
|
<div>
|
||||||
|
<p style="margin-bottom: 0px; margin-top:0px;"><b>Sharpe Mountain Compute</b></p>
|
||||||
|
<p style="margin-bottom: 0px; margin-top:0px;"><i>formerly Nick Web Services (NWS)</i></p>
|
||||||
|
<p style="margin-bottom: 0px;margin-top: 0px;">Copyright © <a href="https://nickorlow.com">Nicholas Orlowsky</a> 2024</p>
|
||||||
|
<p style="margin-top: 0px;"><i>"We're getting there" - SEPTA</i></p>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<img class="flag-img" src="/assets/flag-images/us.png" title="The United States of America"/>
|
||||||
|
<img class="flag-img" src="/assets/flag-images/us-pa.png" title="The Commonwealth of Pennsylvania"/>
|
||||||
|
<img class="flag-img" src="/assets/flag-images/us-tx.png" title="The State of Texas"/>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
</body>
|
0
templates/single-blog.html
Normal file
0
templates/single-blog.html
Normal file
21
templates/system_status.html
Normal file
21
templates/system_status.html
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
{%- import "uptime_table.html" as scope -%}
|
||||||
|
|
||||||
|
<h1>System Status</h1>
|
||||||
|
|
||||||
|
<h2>Datacenter Status</h2>
|
||||||
|
<p>
|
||||||
|
The status of each of Sharpe Mountain Compute's 4
|
||||||
|
datacenters.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
{% call scope::uptime_table(dctr_uptime_infos) %}
|
||||||
|
|
||||||
|
<h2>Service Status</h2>
|
||||||
|
<p>
|
||||||
|
The status of services people host on Sharpe Mountain Compute.
|
||||||
|
Note that the uptime and performance of services hosted on
|
||||||
|
Sharpe Mountain Compute may be affected by factors not controlled by us such as
|
||||||
|
bad optimization or buggy software.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
{% call scope::uptime_table(svc_uptime_infos) %}
|
35
templates/uptime_table.html
Normal file
35
templates/uptime_table.html
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
{% macro uptime_table(uptime_infos) %}
|
||||||
|
<table style="width: 100%;">
|
||||||
|
<tr>
|
||||||
|
<th>Name</th>
|
||||||
|
<th>Uptime YTD</th>
|
||||||
|
<th>Response Time 24h</th>
|
||||||
|
<th>Current Status</th>
|
||||||
|
</tr>
|
||||||
|
{% for uptime_info in uptime_infos %}
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
{% if let Some(click_url) = uptime_info.url %}
|
||||||
|
<a href="{{click_url}}">
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{{uptime_info.name}}
|
||||||
|
|
||||||
|
{% if let Some(click_url) = uptime_info.url %}
|
||||||
|
</a>
|
||||||
|
{% endif %}
|
||||||
|
</td>
|
||||||
|
<td>{{uptime_info.uptime}}</td>
|
||||||
|
<td>{{uptime_info.response_time}}</td>
|
||||||
|
<td
|
||||||
|
{% if uptime_info.status != "Up" %}
|
||||||
|
style="color: red;"
|
||||||
|
{% endif %}
|
||||||
|
>
|
||||||
|
{{uptime_info.status}}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{% endfor %}
|
||||||
|
</table>
|
||||||
|
<p style="margin-top: 0px;"><i>Data current as of {{last_updated}}</i></p>
|
||||||
|
{% endmacro %}
|
Loading…
Reference in a new issue