diff --git a/Cargo.toml b/Cargo.toml index 5d1c0eb..5c41f5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,6 +76,8 @@ uuid = { version = "1.9", features = ["serde", "v4"] } ignore.workspace = true base64.workspace = true tokio-util.workspace = true +sha1 = { version = "0.10", optional = true } +hex = { version = "0.4", optional = true } [workspace.dependencies] pretty_assertions = "1.4" @@ -105,6 +107,7 @@ tempfile = "3.8" [features] proxy = ["dep:pingora"] errorlogs = [] +vercel = ["dep:sha1", "dep:hex"] # The profile that 'dist' will build with [profile.dist] diff --git a/src/adapters/backend/mod.rs b/src/adapters/backend/mod.rs index 3c615e3..f58400e 100644 --- a/src/adapters/backend/mod.rs +++ b/src/adapters/backend/mod.rs @@ -625,6 +625,11 @@ pub enum IngressConfig { account_id: String, worker_name: String, }, + #[cfg(feature = "vercel")] + Vercel { + project_name: String, + team_id: Option, + }, } #[derive(Clone, Debug, Serialize)] @@ -645,6 +650,12 @@ pub enum MonitorConfig { account_id: String, worker_name: String, }, + #[cfg(feature = "vercel")] + Vercel { + api_token: String, + project_name: String, + team_id: Option, + }, } #[derive(Clone, Debug, Serialize)] @@ -658,6 +669,11 @@ pub enum PlatformConfig { account_id: String, worker_name: String, }, + #[cfg(feature = "vercel")] + Vercel { + project_name: String, + team_id: Option, + }, } #[derive(Builder)] diff --git a/src/adapters/ingresses/mod.rs b/src/adapters/ingresses/mod.rs index 601aa3b..d10d779 100644 --- a/src/adapters/ingresses/mod.rs +++ b/src/adapters/ingresses/mod.rs @@ -9,6 +9,8 @@ pub type BoxedIngress = Box; pub(crate) use apig::AwsApiGateway; pub(crate) use cloudflare::CloudflareWorkerIngress; +#[cfg(feature = "vercel")] +pub(crate) use vercel::VercelIngress; use super::backend::IngressConfig; @@ -47,6 +49,8 @@ pub trait Ingress: Shutdownable { mod apig; mod cloudflare; +#[cfg(feature = "vercel")] +mod vercel; #[cfg(test)] mod tests { diff --git a/src/adapters/ingresses/vercel.rs b/src/adapters/ingresses/vercel.rs new file mode 100644 index 0000000..a2ab56b --- /dev/null +++ b/src/adapters/ingresses/vercel.rs @@ -0,0 +1,136 @@ +#[cfg(feature = "vercel")] +use crate::{ + Shutdownable, WholePercent, + adapters::{ + backend::IngressConfig, + vercel::VercelClient as Client, + }, + subsystems::ShutdownResult, +}; + +#[cfg(feature = "vercel")] +use super::Ingress; +#[cfg(feature = "vercel")] +use async_trait::async_trait; +#[cfg(feature = "vercel")] +use derive_getters::Getters; +#[cfg(feature = "vercel")] +use miette::Result; +#[cfg(feature = "vercel")] +use tracing::{debug, info}; + +#[cfg(feature = "vercel")] +#[derive(Getters)] +pub struct VercelIngress { + client: Client, + // The deployment ID of the baseline version + baseline_deployment_id: Option, + // The deployment ID of the canary version + canary_deployment_id: Option, +} + +#[cfg(feature = "vercel")] +impl VercelIngress { + pub fn new(client: Client) -> Self { + Self { + client, + baseline_deployment_id: None, + canary_deployment_id: None, + } + } +} + +#[cfg(feature = "vercel")] +#[async_trait] +impl Ingress for VercelIngress { + fn get_config(&self) -> IngressConfig { + IngressConfig::Vercel { + project_name: self.client.project_name().clone(), + team_id: self.client.team_id().clone(), + } + } + + async fn release_canary( + &mut self, + baseline_deployment_id: String, + canary_deployment_id: String, + ) -> Result<()> { + debug!("Releasing canary in Vercel!"); + + // Save the deployment IDs + self.baseline_deployment_id = Some(baseline_deployment_id.clone()); + self.canary_deployment_id = Some(canary_deployment_id.clone()); + + // Note: Vercel doesn't have built-in canary deployment traffic splitting + // This is a placeholder implementation + // In a real implementation, you would: + // 1. Use Vercel's Edge Config or similar feature for traffic splitting + // 2. Configure a middleware to route traffic based on percentages + // 3. Or use Vercel's deployment promotion API + + info!("Canary deployment created: {}", canary_deployment_id); + info!("Baseline deployment: {}", baseline_deployment_id); + + Ok(()) + } + + async fn set_canary_traffic(&mut self, percent: WholePercent) -> Result<()> { + info!("Setting Vercel canary traffic to {percent} (placeholder implementation)"); + + // Note: Vercel doesn't natively support percentage-based traffic splitting + // like CloudFlare Workers. This would require: + // 1. Setting up Edge Config or Edge Middleware + // 2. Implementing custom traffic routing logic + // 3. Using Vercel's API to update the configuration + + // For now, this is a placeholder + debug!( + "Would route {}% traffic to canary: {:?}", + percent.as_i32(), + self.canary_deployment_id + ); + + Ok(()) + } + + async fn rollback_canary(&mut self) -> Result<()> { + info!("Rolling back canary in Vercel (placeholder implementation)"); + + // In a real implementation, this would: + // 1. Remove the canary deployment from production + // 2. Ensure 100% traffic goes to baseline + // 3. Update Edge Config or middleware settings + + self.canary_deployment_id = None; + + Ok(()) + } + + async fn promote_canary(&mut self) -> Result<()> { + info!("Promoting canary in Vercel!"); + + // In a real implementation, this would: + // 1. Make the canary deployment the production deployment + // 2. Update DNS/routing to point to the canary + // 3. Use Vercel's API to promote the deployment + + self.canary_deployment_id = None; + + Ok(()) + } +} + +#[cfg(feature = "vercel")] +#[async_trait] +impl Shutdownable for VercelIngress { + async fn shutdown(&mut self) -> ShutdownResult { + // If there's no canary deployment ID set, there's nothing to rollback + if self.canary_deployment_id.is_none() { + debug!("No canary deployment ID set, nothing to rollback."); + return Ok(()); + } + + self.rollback_canary().await?; + Ok(()) + } +} diff --git a/src/adapters/mod.rs b/src/adapters/mod.rs index 6ff6097..9a07d7d 100644 --- a/src/adapters/mod.rs +++ b/src/adapters/mod.rs @@ -1,6 +1,8 @@ pub use backend::BackendClient; pub(crate) use backend::{LockedState, RolloutMetadata}; pub use cloudflare::CloudflareClient; +#[cfg(feature = "vercel")] +pub use vercel::VercelClient; pub use ingresses::*; pub use monitors::*; @@ -9,6 +11,9 @@ pub use platforms::*; pub mod backend; /// MultiTool's Cloudflare HTTP client. mod cloudflare; +/// MultiTool's Vercel HTTP client. +#[cfg(feature = "vercel")] +mod vercel; /// Contains the trait definition and ingress implementations. Ingresses are responsible /// for actuating changes to traffic. mod ingresses; diff --git a/src/adapters/monitors/mod.rs b/src/adapters/monitors/mod.rs index 812c471..ef94784 100644 --- a/src/adapters/monitors/mod.rs +++ b/src/adapters/monitors/mod.rs @@ -13,6 +13,8 @@ pub type StatusCode = CategoricalObservation<5, ResponseStatusCode>; pub use cloudflare::CloudflareMonitor; pub use cloudwatch::CloudWatch; +#[cfg(feature = "vercel")] +pub use vercel::VercelMonitor; use super::backend::MonitorConfig; @@ -60,3 +62,5 @@ pub trait Monitor: Shutdownable { mod cloudflare; mod cloudwatch; +#[cfg(feature = "vercel")] +mod vercel; diff --git a/src/adapters/monitors/vercel.rs b/src/adapters/monitors/vercel.rs new file mode 100644 index 0000000..f8ae286 --- /dev/null +++ b/src/adapters/monitors/vercel.rs @@ -0,0 +1,98 @@ +#[cfg(feature = "vercel")] +use async_trait::async_trait; +#[cfg(feature = "vercel")] +use chrono::{DateTime, Utc}; +#[cfg(feature = "vercel")] +use derive_getters::Getters; +#[cfg(feature = "vercel")] +use tracing::info; + +#[cfg(feature = "vercel")] +use crate::{ + Shutdownable, + adapters::{backend::MonitorConfig, vercel::VercelClient as Client}, + metrics::ResponseStatusCode, + stats::{CategoricalObservation, Group}, + subsystems::ShutdownResult, +}; +#[cfg(feature = "vercel")] +use miette::Result; + +#[cfg(feature = "vercel")] +use super::Monitor; + +#[cfg(feature = "vercel")] +#[derive(Getters)] +pub struct VercelMonitor { + client: Client, + // The deployment ID of the baseline version + baseline_deployment_id: Option, + // The deployment ID of the canary version + canary_deployment_id: Option, + // The time we started querying + _start_time: DateTime, +} + +#[cfg(feature = "vercel")] +impl VercelMonitor { + pub fn new(client: Client) -> Self { + Self { + client, + baseline_deployment_id: None, + canary_deployment_id: None, + _start_time: Utc::now(), + } + } +} + +#[cfg(feature = "vercel")] +#[async_trait] +impl Monitor for VercelMonitor { + type Item = CategoricalObservation<5, ResponseStatusCode>; + + fn get_config(&self) -> MonitorConfig { + MonitorConfig::Vercel { + api_token: self.client.api_token().clone(), + project_name: self.client.project_name().clone(), + team_id: self.client.team_id().clone(), + } + } + + async fn query(&mut self) -> Result> { + info!("Querying Vercel for metrics (placeholder implementation)"); + + // Note: Vercel's analytics API requires a paid plan + // This is a placeholder implementation that returns empty metrics + // In a real implementation, you would: + // 1. Query Vercel's analytics API for each deployment + // 2. Parse the response status codes + // 3. Create CategoricalObservations for baseline and canary + + let metrics = Vec::new(); + + // TODO: Implement actual Vercel analytics API integration + // This would require calling Vercel's analytics endpoints + // and parsing the response data + + Ok(metrics) + } + + async fn set_canary_version_id(&mut self, canary_version_id: String) -> Result<()> { + self.canary_deployment_id = Some(canary_version_id); + Ok(()) + } + + async fn set_baseline_version_id(&mut self, baseline_version_id: String) -> Result<()> { + self.baseline_deployment_id = Some(baseline_version_id); + Ok(()) + } +} + +#[cfg(feature = "vercel")] +#[async_trait] +impl Shutdownable for VercelMonitor { + async fn shutdown(&mut self) -> ShutdownResult { + // When we get the shutdown signal, we stop querying + Ok(()) + } +} diff --git a/src/adapters/platforms/mod.rs b/src/adapters/platforms/mod.rs index e7c9a6a..5fe69f5 100644 --- a/src/adapters/platforms/mod.rs +++ b/src/adapters/platforms/mod.rs @@ -7,6 +7,8 @@ pub type BoxedPlatform = Box; pub(crate) use cloudflare::CloudflareWorkerPlatform; pub(crate) use lambda::LambdaPlatform; +#[cfg(feature = "vercel")] +pub(crate) use vercel::VercelPlatform; use super::backend::PlatformConfig; @@ -36,6 +38,8 @@ impl Shutdownable for MockPlatform { mod cloudflare; mod lambda; +#[cfg(feature = "vercel")] +mod vercel; #[cfg(test)] mod tests { diff --git a/src/adapters/platforms/vercel.rs b/src/adapters/platforms/vercel.rs new file mode 100644 index 0000000..076b15f --- /dev/null +++ b/src/adapters/platforms/vercel.rs @@ -0,0 +1,103 @@ +#[cfg(feature = "vercel")] +use std::path::PathBuf; + +#[cfg(feature = "vercel")] +use crate::{ + Shutdownable, + adapters::{backend::PlatformConfig, vercel::VercelClient as Client}, + artifacts::VercelFileManifest, + subsystems::ShutdownResult, +}; + +#[cfg(feature = "vercel")] +use super::Platform; +#[cfg(feature = "vercel")] +use async_trait::async_trait; +#[cfg(feature = "vercel")] +use derive_getters::Getters; +#[cfg(feature = "vercel")] +use miette::Result; +#[cfg(feature = "vercel")] +use tracing::info; + +#[cfg(feature = "vercel")] +#[derive(Getters)] +pub struct VercelPlatform { + client: Client, + project_dir: PathBuf, + project_name: String, +} + +#[cfg(feature = "vercel")] +impl VercelPlatform { + pub fn new(client: Client, project_dir: PathBuf, project_name: String) -> Self { + Self { + client, + project_dir, + project_name, + } + } +} + +#[cfg(feature = "vercel")] +#[async_trait] +impl Platform for VercelPlatform { + fn get_config(&self) -> PlatformConfig { + PlatformConfig::Vercel { + project_name: self.project_name.clone(), + team_id: self.client.team_id().clone(), + } + } + + async fn deploy(&mut self) -> Result<(String, String)> { + info!("Deploying to Vercel!"); + + // Get the current deployment ID as baseline + let baseline_version_id = self.client.get_current_deployment().await?; + + // 1. Create a manifest of the files to upload with SHA1 hashes + let file_manifest = VercelFileManifest::new(&self.project_dir).await?; + + // 2. Upload files in parallel using tokio WaitGroup pattern + self.client.upload_files(&file_manifest).await?; + + // 3. Create the deployment + let deployment_response = self + .client + .create_deployment(&file_manifest, &self.project_name) + .await?; + + info!("Vercel deployment created: {}", deployment_response.url); + + // Return baseline and new canary deployment IDs + Ok((baseline_version_id, deployment_response.id)) + } + + async fn yank_canary(&mut self) -> Result<()> { + // For Vercel, we handle traffic through the ingress layer + // The platform doesn't need to yank the canary + Ok(()) + } + + async fn delete_canary(&mut self) -> Result<()> { + // Vercel deployments are immutable and can remain + // We don't need to delete them + Ok(()) + } + + async fn promote_rollout(&mut self) -> Result<()> { + // For Vercel, promotion is handled through the ingress layer + // by updating which deployment receives production traffic + Ok(()) + } +} + +#[cfg(feature = "vercel")] +#[async_trait] +impl Shutdownable for VercelPlatform { + async fn shutdown(&mut self) -> ShutdownResult { + // When we get the shutdown signal, we don't need to do anything + // Vercel deployments persist + Ok(()) + } +} diff --git a/src/adapters/vercel/mod.rs b/src/adapters/vercel/mod.rs new file mode 100644 index 0000000..c8fdfdb --- /dev/null +++ b/src/adapters/vercel/mod.rs @@ -0,0 +1,277 @@ +#[cfg(feature = "vercel")] +use derive_getters::Getters; +#[cfg(feature = "vercel")] +use miette::{IntoDiagnostic, Result, miette}; +#[cfg(feature = "vercel")] +use reqwest::header::{AUTHORIZATION, CONTENT_TYPE, HeaderMap, HeaderValue}; +#[cfg(feature = "vercel")] +use reqwest::Client; +#[cfg(feature = "vercel")] +use serde::{Deserialize, Serialize}; +#[cfg(feature = "vercel")] +use std::sync::Arc; +#[cfg(feature = "vercel")] +use tokio::sync::Semaphore; +#[cfg(feature = "vercel")] +use tracing::{debug, info}; +#[cfg(feature = "vercel")] +use url::Url; + +#[cfg(feature = "vercel")] +use crate::artifacts::VercelFileManifest; + +#[cfg(feature = "vercel")] +#[derive(Clone, Getters)] +pub struct VercelClient { + client: Client, + /// The Vercel API token + api_token: String, + /// The Vercel team ID (optional) + team_id: Option, + /// The project name + project_name: String, +} + +#[cfg(feature = "vercel")] +impl VercelClient { + pub fn new(api_token: String, project_name: String, team_id: Option) -> Self { + let mut default_headers = HeaderMap::new(); + let auth = format!("Bearer {}", api_token); + let mut auth_value = HeaderValue::from_str(&auth).expect("Must be able to set header"); + auth_value.set_sensitive(true); + default_headers.insert(AUTHORIZATION, auth_value); + + let client = Client::builder() + .default_headers(default_headers) + .build() + .expect("Must be able to construct client"); + + Self { + client, + api_token, + team_id, + project_name, + } + } + + fn base_url() -> &'static str { + "https://api.vercel.com" + } + + /// Upload files to Vercel using parallel uploads with a tokio WaitGroup pattern + /// Each file is uploaded individually with its SHA1 hash + pub async fn upload_files(&self, manifest: &VercelFileManifest) -> Result<()> { + info!("Uploading {} files to Vercel in parallel", manifest.files().len()); + + // Create a semaphore to limit concurrent uploads (max 10 at a time) + let semaphore = Arc::new(Semaphore::new(10)); + let mut upload_tasks = Vec::new(); + + for file_entry in manifest.files() { + let permit = semaphore.clone(); + let client = self.client.clone(); + let file_path = file_entry.path().to_path_buf(); + let sha1 = file_entry.sha1().to_string(); + let team_id = self.team_id.clone(); + + let task = tokio::spawn(async move { + let _permit = permit.acquire().await.unwrap(); + + debug!("Uploading file: {:?} with SHA1: {}", file_path, sha1); + + // Read file contents + let file_bytes = tokio::fs::read(&file_path).await.into_diagnostic()?; + + // Build the upload URL + let mut url = Url::parse(&format!("{}/v2/now/files", Self::base_url())) + .into_diagnostic()?; + + if let Some(team_id) = team_id { + url.query_pairs_mut().append_pair("teamId", &team_id); + } + + // Create headers + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/octet-stream")); + headers.insert("x-vercel-digest", HeaderValue::from_str(&sha1).into_diagnostic()?); + headers.insert("Content-Length", HeaderValue::from_str(&file_bytes.len().to_string()).into_diagnostic()?); + + // Upload the file + let response = client + .post(url) + .headers(headers) + .body(file_bytes) + .send() + .await + .into_diagnostic()?; + + if !response.status().is_success() { + let error_text = response.text().await.into_diagnostic()?; + return Err(miette!( + "Failed to upload file {:?}. Error: {}", + file_path, + error_text + )); + } + + debug!("Successfully uploaded file: {:?}", file_path); + Ok::<(), miette::Report>(()) + }); + + upload_tasks.push(task); + } + + // Wait for all uploads to complete (WaitGroup pattern) + let results = futures_util::future::join_all(upload_tasks).await; + + // Check if any uploads failed + for result in results { + result.into_diagnostic()??; + } + + info!("All files uploaded successfully"); + Ok(()) + } + + /// Create a deployment after files have been uploaded + pub async fn create_deployment( + &self, + manifest: &VercelFileManifest, + project_name: &str, + ) -> Result { + debug!("Creating Vercel deployment"); + + let mut url = Url::parse(&format!("{}/v13/deployments", Self::base_url())) + .into_diagnostic()?; + + if let Some(team_id) = &self.team_id { + url.query_pairs_mut().append_pair("teamId", team_id); + } + + // Build the files array with SHA1 hashes + let files: Vec = manifest + .files() + .iter() + .map(|entry| { + let file_path = entry.path(); + let path_str = file_path + .to_str() + .unwrap_or("") + .to_string(); + + DeploymentFile { + file: path_str, + sha: entry.sha1().to_string(), + size: 0, // Vercel doesn't strictly require size in the API + } + }) + .collect(); + + let request = CreateDeploymentRequest { + name: project_name.to_string(), + files, + project_settings: None, + target: Some("production".to_string()), + }; + + let response = self + .client + .post(url) + .json(&request) + .send() + .await + .into_diagnostic()?; + + if !response.status().is_success() { + let error_text = response.text().await.into_diagnostic()?; + return Err(miette!("Failed to create deployment. Error: {}", error_text)); + } + + let deployment_response = response + .json::() + .await + .into_diagnostic()?; + + debug!("Deployment created successfully"); + Ok(deployment_response) + } + + /// Get the current deployment for a project + pub async fn get_current_deployment(&self) -> Result { + debug!("Getting current Vercel deployment"); + + let mut url = Url::parse(&format!( + "{}/v9/projects/{}", + Self::base_url(), + self.project_name + )) + .into_diagnostic()?; + + if let Some(team_id) = &self.team_id { + url.query_pairs_mut().append_pair("teamId", team_id); + } + + let response = self.client.get(url).send().await.into_diagnostic()?; + + if !response.status().is_success() { + return Err(miette!("Failed to get current deployment")); + } + + let project_response = response + .json::() + .await + .into_diagnostic()?; + + // Return the latest production deployment ID + project_response + .targets + .and_then(|t| t.production) + .and_then(|p| p.id) + .ok_or_else(|| miette!("No production deployment found")) + } +} + +#[cfg(feature = "vercel")] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DeploymentFile { + pub file: String, + pub sha: String, + pub size: u64, +} + +#[cfg(feature = "vercel")] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateDeploymentRequest { + pub name: String, + pub files: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub project_settings: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub target: Option, +} + +#[cfg(feature = "vercel")] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CreateDeploymentResponse { + pub id: String, + pub url: String, +} + +#[cfg(feature = "vercel")] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProjectResponse { + pub targets: Option, +} + +#[cfg(feature = "vercel")] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProjectTargets { + pub production: Option, +} + +#[cfg(feature = "vercel")] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DeploymentTarget { + pub id: Option, +} diff --git a/src/artifacts/mod.rs b/src/artifacts/mod.rs index 08ebf6b..7224669 100644 --- a/src/artifacts/mod.rs +++ b/src/artifacts/mod.rs @@ -5,8 +5,12 @@ use tokio::fs::File; use tokio::io::AsyncReadExt; mod cloudflare; +#[cfg(feature = "vercel")] +mod vercel; pub(crate) use cloudflare::CloudflareFileManifest; +#[cfg(feature = "vercel")] +pub(crate) use vercel::VercelFileManifest; pub struct LambdaZip(Vec); diff --git a/src/artifacts/vercel/manifest.rs b/src/artifacts/vercel/manifest.rs new file mode 100644 index 0000000..886b6e9 --- /dev/null +++ b/src/artifacts/vercel/manifest.rs @@ -0,0 +1,134 @@ +use std::path::{Path, PathBuf}; + +use derive_getters::Getters; +use ignore::{ + Walk, WalkBuilder, + types::{Types, TypesBuilder}, +}; +use miette::{IntoDiagnostic as _, Result, miette}; +use tracing::debug; + +use crate::fs::manifest::manifest_filenames; + +#[cfg(feature = "vercel")] +use sha1::{Sha1, Digest}; + +#[derive(Clone, Debug)] +pub(crate) struct VercelFileEntry { + path: PathBuf, + sha1: String, +} + +impl VercelFileEntry { + pub fn path(&self) -> &Path { + &self.path + } + + pub fn sha1(&self) -> &str { + &self.sha1 + } +} + +#[derive(Getters, Clone, Debug)] +pub(crate) struct VercelFileManifest { + files: Vec, +} + +impl VercelFileManifest { + /// Build a new Manifest using the given root directory. + /// Calculates SHA1 hash for each file. + #[cfg(feature = "vercel")] + pub async fn new>(root: P) -> Result { + debug!("Building Vercel manifest with SHA1 hashes"); + let directory = root.as_ref().to_path_buf(); + + // We must provide a valid directory. + if !directory.metadata().is_ok_and(|meta| meta.is_dir()) { + return Err(miette!(format!( + "The provided path `{}` is not a valid directory", + directory.display() + ))); + } + + let mut files = Vec::new(); + let manifest_filenames = manifest_filenames(); + + // Build the file tree walker. + let walker = walk_builder(directory.clone()); + + for entry in walker { + debug!("Processing entry: {:?}", entry.as_ref().ok().map(|e| e.path())); + let file_entry = entry.into_diagnostic()?; + + // Ignore directories, we only want files. + if file_entry.file_type().map_or(false, |ft| ft.is_dir()) { + continue; + } + + let file_path = file_entry.path().to_path_buf(); + + // Skip files with names that match manifest filenames + if let Some(filename) = file_path.file_name().and_then(|n| n.to_str()) { + if manifest_filenames.contains(&filename.to_string()) { + continue; + } + } + + // Calculate SHA1 hash + let file_bytes = tokio::fs::read(&file_path).await.into_diagnostic()?; + let mut hasher = Sha1::new(); + hasher.update(&file_bytes); + let hash_result = hasher.finalize(); + let sha1 = hex::encode(hash_result); + + files.push(VercelFileEntry { + path: file_path, + sha1, + }); + } + + debug!( + "Finished building Vercel manifest with {} files", + files.len() + ); + + // A manifest with no files should be an error + if files.is_empty() { + return Err(miette!(format!( + "No files found in directory `{}` to upload", + directory.display() + ))); + } + + Ok(Self { files }) + } + + #[cfg(not(feature = "vercel"))] + pub async fn new>(_root: P) -> Result { + Err(miette!("Vercel feature is not enabled. Enable the 'vercel' feature flag to use Vercel functionality.")) + } +} + +/// Build a file loader that loads web development files. +fn types_matches() -> Types { + let mut builder = TypesBuilder::new(); + builder.add_defaults(); + builder + .select("ts") + .select("js") + .select("css") + .select("html") + .select("json"); + builder.build().unwrap() +} + +/// Builder the Walker that walks the file tree looking for files. +/// It obeys the type filters we created. +fn walk_builder(dir: PathBuf) -> Walk { + let types = types_matches(); + WalkBuilder::new(dir) + .standard_filters(true) + .parents(false) + .types(types) + .build() +} diff --git a/src/artifacts/vercel/mod.rs b/src/artifacts/vercel/mod.rs new file mode 100644 index 0000000..6cffd32 --- /dev/null +++ b/src/artifacts/vercel/mod.rs @@ -0,0 +1,3 @@ +mod manifest; + +pub(crate) use manifest::VercelFileManifest; diff --git a/src/config/run/mod.rs b/src/config/run/mod.rs index 421f64e..054cad3 100644 --- a/src/config/run/mod.rs +++ b/src/config/run/mod.rs @@ -49,6 +49,17 @@ pub struct RunSubcommand { /// The path to the artifact to upload to AWS. #[arg(long, env = "AWS_ARTIFACT_PATH")] aws_artifact_path: Option, + + /// Vercel Config + /// The API token to use for Vercel API requests. + #[arg(long, env = "VERCEL_API_TOKEN")] + vercel_api_token: Option, + /// The Vercel team ID (optional) + #[arg(long, env = "VERCEL_TEAM_ID")] + vercel_team_id: Option, + /// The path to the Vercel project directory. + #[arg(long, env = "VERCEL_PROJECT_DIR")] + vercel_project_dir: Option, } impl RunSubcommand { @@ -92,6 +103,18 @@ impl RunSubcommand { self.aws_artifact_path.as_deref() } + pub fn vercel_api_token(&self) -> Option<&str> { + self.vercel_api_token.as_deref() + } + + pub fn vercel_team_id(&self) -> Option<&str> { + self.vercel_team_id.as_deref() + } + + pub fn vercel_project_dir(&self) -> Option<&Path> { + self.vercel_project_dir.as_deref() + } + pub fn workspace(&self) -> Option<&str> { self.workspace.as_deref() } diff --git a/src/fs/manifest/schema.rs b/src/fs/manifest/schema.rs index aa899ba..5fdd27d 100644 --- a/src/fs/manifest/schema.rs +++ b/src/fs/manifest/schema.rs @@ -15,6 +15,8 @@ use crate::{ config::RunSubcommand, fs::{FileSystem, wrangler::Wrangler}, }; +#[cfg(feature = "vercel")] +use crate::adapters::{VercelClient, VercelIngress, VercelMonitor, VercelPlatform}; use miette::Result; /// The application manifest only needs to be loaded once, so we @@ -156,6 +158,8 @@ impl ConfigSection { pub enum MonitorConfig { AwsCloudwatch(AwsCloudwatch), CloudflareObservability(CloudflareConfig), + #[cfg(feature = "vercel")] + Vercel(VercelConfig), } impl MonitorConfig { @@ -171,6 +175,8 @@ impl MonitorConfig { MonitorConfig::CloudflareObservability(cloudflare_observability) => { cloudflare_observability.load_monitor(args) } + #[cfg(feature = "vercel")] + MonitorConfig::Vercel(vercel) => vercel.load_monitor(args), } } } @@ -242,6 +248,8 @@ impl AwsCloudwatch { pub enum IngressConfig { AwsApiGateway(AwsApiGatewayConfig), CloudflareWorkers(CloudflareConfig), + #[cfg(feature = "vercel")] + Vercel(VercelConfig), } impl IngressConfig { @@ -249,6 +257,8 @@ impl IngressConfig { match self { IngressConfig::AwsApiGateway(config) => config.load_ingress(args).await, IngressConfig::CloudflareWorkers(config) => config.load_ingress(args), + #[cfg(feature = "vercel")] + IngressConfig::Vercel(config) => config.load_ingress(args), } } } @@ -282,6 +292,8 @@ impl AwsApiGatewayConfig { pub enum PlatformConfig { AwsLambda(AwsLambdaConfig), CloudflareWorkers(CloudflareConfig), + #[cfg(feature = "vercel")] + Vercel(VercelConfig), } impl PlatformConfig { @@ -289,6 +301,8 @@ impl PlatformConfig { match self { PlatformConfig::AwsLambda(config) => config.load_platform(args).await, PlatformConfig::CloudflareWorkers(config) => config.load_platform(args), + #[cfg(feature = "vercel")] + PlatformConfig::Vercel(config) => config.load_platform(args), } } } @@ -420,6 +434,87 @@ impl CloudflareConfig { } } +#[cfg(feature = "vercel")] +#[derive(Clone, Default, Deserialize, Serialize, PartialEq, Eq, Debug)] +#[serde(rename_all = "kebab-case")] +pub struct VercelConfig { + project_name: String, + project_dir: Option, + team_id: Option, + + /// We always get this value from the command line. + #[serde(skip)] + api_token: Option, +} + +#[cfg(feature = "vercel")] +impl VercelConfig { + fn load_api_token(&self, args: &RunSubcommand) -> Result { + args + .vercel_api_token() + .map(ToString::to_string) + .or_else(|| std::env::var("VERCEL_API_TOKEN").ok()) + .ok_or_else(|| miette!("No Vercel API token was provided. Either set the environment variable VERCEL_API_TOKEN, or provide it as the --vercel-api-token CLI flag.")) + } + + fn load_team_id(&self, args: &RunSubcommand) -> Option { + args.vercel_team_id() + .map(ToString::to_string) + .or_else(|| self.team_id.clone()) + .or_else(|| std::env::var("VERCEL_TEAM_ID").ok()) + } + + fn load_project_dir(&self, fs: &FileSystem, args: &RunSubcommand) -> Result { + if let Some(path) = args.vercel_project_dir() { + return Ok(path.to_path_buf()); + } + + if let Some(path) = &self.project_dir { + return Ok(PathBuf::from(path)); + } + + // Finally, default to current working directory + let current_dir = match fs.application_dir() { + Err(err) => Err(err), + Ok(Some(path)) => Ok(path), + Ok(None) => std::env::current_dir() + .map_err(|e| miette!("Failed to get current directory: {}", e)), + }?; + + Ok(current_dir) + } + + fn load_ingress(&self, args: &RunSubcommand) -> Result { + let api_token = self.load_api_token(args)?; + let team_id = self.load_team_id(args); + let client = VercelClient::new(api_token, self.project_name.clone(), team_id); + + Ok(Box::new(VercelIngress::new(client))) + } + + fn load_monitor(&self, args: &RunSubcommand) -> Result { + let api_token = self.load_api_token(args)?; + let team_id = self.load_team_id(args); + let client = VercelClient::new(api_token, self.project_name.clone(), team_id); + + Ok(Box::new(VercelMonitor::new(client))) + } + + fn load_platform(&self, args: &RunSubcommand) -> Result { + let fs = FileSystem::new()?; + let api_token = self.load_api_token(args)?; + let team_id = self.load_team_id(args); + let project_dir = self.load_project_dir(&fs, args)?; + let client = VercelClient::new(api_token.clone(), self.project_name.clone(), team_id); + + Ok(Box::new(VercelPlatform::new( + client, + project_dir, + self.project_name.clone(), + ))) + } +} + #[cfg(test)] mod tests { use crate::manifest::CloudflareConfig;