diff --git a/internal/api/handlers/cloning_handler.go b/internal/api/handlers/cloning_handler.go index 30d8a43..b3d5bf6 100644 --- a/internal/api/handlers/cloning_handler.go +++ b/internal/api/handlers/cloning_handler.go @@ -11,6 +11,7 @@ import ( "github.com/cpp-cyber/proclone/internal/ldap" "github.com/cpp-cyber/proclone/internal/proxmox" "github.com/cpp-cyber/proclone/internal/tools" + "github.com/cpp-cyber/proclone/internal/tools/sse" "github.com/gin-contrib/sessions" "github.com/gin-gonic/gin" ) @@ -88,16 +89,54 @@ func (ch *CloningHandler) CloneTemplateHandler(c *gin.Context) { return } + // Check for existing deployments before starting SSE + targetPoolName := fmt.Sprintf("%s_%s", req.Template, username) + isValid, err := ch.Service.ValidateCloneRequest(targetPoolName, username) + if err != nil { + log.Printf("Error validating deployment for user %s: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to validate existing deployments", + "details": err.Error(), + }) + return + } + if !isValid { + log.Printf("Template %s is already deployed for user %s or they have exceeded deployment limits", req.Template, username) + c.JSON(http.StatusConflict, gin.H{ + "error": "Deployment not allowed", + "details": fmt.Sprintf("Template %s is already deployed for %s or they have exceeded the maximum of 5 deployed pods", req.Template, username), + }) + return + } + + // Create new sse object for streaming + sseWriter, err := sse.NewWriter(c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to initialize SSE", + "details": err.Error(), + }) + return + } + + sseWriter.Send( + cloning.ProgressMessage{ + Message: "Retrieving template information", + Progress: 0, + }, + ) + // Create the cloning request using the new format cloneReq := cloning.CloneRequest{ Template: req.Template, - CheckExistingDeployments: true, // Check for existing deployments for single user clones + CheckExistingDeployments: false, // Already checked above Targets: []cloning.CloneTarget{ { Name: username, IsGroup: false, }, }, + SSE: sseWriter, } if err := ch.Service.CloneTemplate(cloneReq); err != nil { @@ -144,16 +183,27 @@ func (ch *CloningHandler) AdminCloneTemplateHandler(c *gin.Context) { }) } + // Create new sse object for streaming + sseWriter, err := sse.NewWriter(c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to initialize SSE", + "details": err.Error(), + }) + return + } + // Create clone request cloneReq := cloning.CloneRequest{ Template: req.Template, Targets: targets, CheckExistingDeployments: false, StartingVMID: req.StartingVMID, + SSE: sseWriter, } // Perform clone operation - err := ch.Service.CloneTemplate(cloneReq) + err = ch.Service.CloneTemplate(cloneReq) if err != nil { log.Printf("Admin %s encountered error while bulk cloning template: %v", username, err) c.JSON(http.StatusInternalServerError, gin.H{ @@ -264,7 +314,7 @@ func (ch *CloningHandler) GetPodsHandler(c *gin.Context) { // Loop through the user's deployed pods and add template information for i := range pods { - templateName := strings.Replace(pods[i].Name[5:], fmt.Sprintf("_%s", username), "", 1) + templateName := strings.Replace(strings.ToLower(pods[i].Name[5:]), fmt.Sprintf("_%s", strings.ToLower(username)), "", 1) templateInfo, err := ch.Service.DatabaseService.GetTemplateInfo(templateName) if err != nil { log.Printf("Error retrieving template info for pod %s: %v", pods[i].Name, err) diff --git a/internal/api/handlers/dashboard_handler.go b/internal/api/handlers/dashboard_handler.go index 8571840..62321b5 100644 --- a/internal/api/handlers/dashboard_handler.go +++ b/internal/api/handlers/dashboard_handler.go @@ -91,7 +91,7 @@ func (dh *DashboardHandler) GetUserDashboardStatsHandler(c *gin.Context) { // Loop through the user's deployed pods and add template information for i := range pods { - templateName := strings.Replace(pods[i].Name[5:], fmt.Sprintf("_%s", username), "", 1) + templateName := strings.Replace(strings.ToLower(pods[i].Name[5:]), fmt.Sprintf("_%s", strings.ToLower(username)), "", 1) templateInfo, err := dh.cloningHandler.Service.DatabaseService.GetTemplateInfo(templateName) if err != nil { log.Printf("Error retrieving template info for pod %s: %v", pods[i].Name, err) diff --git a/internal/api/middleware/authorization.go b/internal/api/middleware/authorization.go index e173d39..6fb069f 100644 --- a/internal/api/middleware/authorization.go +++ b/internal/api/middleware/authorization.go @@ -63,12 +63,14 @@ func Logout(c *gin.Context) { func CORSMiddleware(fqdn string) gin.HandlerFunc { return func(c *gin.Context) { - c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.Header().Set("Content-Type", "application/json; text/event-stream") c.Writer.Header().Set("Access-Control-Allow-Origin", fqdn) c.Writer.Header().Set("Access-Control-Max-Age", "86400") c.Writer.Header().Set("Access-Control-Allow-Credentials", "true") c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, Origin") + c.Writer.Header().Set("Cache-Control", "no-cache") + c.Writer.Header().Set("Connection", "keep-alive") if c.Request.Method == "OPTIONS" { c.AbortWithStatus(200) diff --git a/internal/cloning/cloning_service.go b/internal/cloning/cloning_service.go index 31c3d5d..346fd48 100644 --- a/internal/cloning/cloning_service.go +++ b/internal/cloning/cloning_service.go @@ -5,7 +5,7 @@ import ( "fmt" "log" "os" - "strings" + "regexp" "time" "github.com/cpp-cyber/proclone/internal/ldap" @@ -85,11 +85,11 @@ func (cs *CloningService) CloneTemplate(req CloneRequest) error { // 3. Identify router and other VMs var router *proxmox.VM var templateVMs []proxmox.VM + routerPattern := regexp.MustCompile(`(?i)(router|pfsense|vyos)`) for _, vm := range templatePool { // Check to see if this VM is the router - lowerVMName := strings.ToLower(vm.Name) - if strings.Contains(lowerVMName, "router") || strings.Contains(lowerVMName, "pfsense") { + if routerPattern.MatchString(vm.Name) { router = &proxmox.VM{ Name: vm.Name, Node: vm.NodeName, @@ -166,6 +166,13 @@ func (cs *CloningService) CloneTemplate(req CloneRequest) error { } // 7. Clone targets to proxmox + req.SSE.Send( + ProgressMessage{ + Message: "Cloning VMs", + Progress: 10, + }, + ) + for _, target := range req.Targets { // Find best node per target bestNode, err := cs.ProxmoxService.FindBestNode() @@ -186,9 +193,16 @@ func (cs *CloningService) CloneTemplate(req CloneRequest) error { if err != nil { errors = append(errors, fmt.Sprintf("failed to clone router VM for %s: %v", target.Name, err)) } else { + // Determine router type + routerType, err := cs.getRouterType(*router) + if err != nil { + errors = append(errors, fmt.Sprintf("failed to get router type for %s: %v", target.Name, err)) + } + // Store router info for later operations clonedRouters = append(clonedRouters, RouterInfo{ TargetName: target.Name, + RouterType: routerType, PodNumber: target.PodNumber, Node: bestNode, VMID: target.VMIDs[0], @@ -251,6 +265,12 @@ func (cs *CloningService) CloneTemplate(req CloneRequest) error { } // 10. Start all routers and wait for them to be running + req.SSE.Send( + ProgressMessage{ + Message: "Starting routers", + Progress: 25, + }, + ) log.Printf("Starting %d routers", len(clonedRouters)) for _, routerInfo := range clonedRouters { // Wait for router disk to be available @@ -278,6 +298,13 @@ func (cs *CloningService) CloneTemplate(req CloneRequest) error { } // 11. Configure all pod routers (separate step after all routers are running) + req.SSE.Send( + ProgressMessage{ + Message: "Configuring pod routers", + Progress: 33, + }, + ) + log.Printf("Configuring %d pod routers", len(clonedRouters)) for _, routerInfo := range clonedRouters { // Double-check that router is still running before configuration @@ -288,12 +315,20 @@ func (cs *CloningService) CloneTemplate(req CloneRequest) error { } log.Printf("Configuring pod router for %s (Pod: %d, VMID: %d)", routerInfo.TargetName, routerInfo.PodNumber, routerInfo.VMID) - err = cs.configurePodRouter(routerInfo.PodNumber, routerInfo.Node, routerInfo.VMID) + err = cs.configurePodRouter(routerInfo.PodNumber, routerInfo.Node, routerInfo.VMID, routerInfo.RouterType) if err != nil { errors = append(errors, fmt.Sprintf("failed to configure pod router for %s: %v", routerInfo.TargetName, err)) } } + // Router configuration complete - update progress + req.SSE.Send( + ProgressMessage{ + Message: "Finalizing deployment", + Progress: 90, + }, + ) + // 12. Set permissions on the pool to the user/group for _, target := range req.Targets { err = cs.ProxmoxService.SetPoolPermission(target.PoolName, target.Name, target.IsGroup) @@ -308,6 +343,14 @@ func (cs *CloningService) CloneTemplate(req CloneRequest) error { errors = append(errors, fmt.Sprintf("failed to increment template deployments for %s: %v", req.Template, err)) } + // Final completion message + req.SSE.Send( + ProgressMessage{ + Message: "Template cloning completed!", + Progress: 100, + }, + ) + // Handle errors and cleanup if necessary if len(errors) > 0 { cs.cleanupFailedClones(createdPools) @@ -357,9 +400,7 @@ func (cs *CloningService) DeletePod(pod string) error { VMID: vm.VmId, }) stoppedCount++ - } else { } - } else { } } diff --git a/internal/cloning/networking.go b/internal/cloning/networking.go index 0cc56f3..c7475fa 100644 --- a/internal/cloning/networking.go +++ b/internal/cloning/networking.go @@ -5,13 +5,35 @@ import ( "log" "math" "regexp" + "strings" "time" + "github.com/cpp-cyber/proclone/internal/proxmox" "github.com/cpp-cyber/proclone/internal/tools" ) +func (cs *CloningService) getRouterType(router proxmox.VM) (string, error) { + infoReq := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/config", router.Node, router.VMID), + } + + infoRsp, err := cs.ProxmoxService.GetRequestHelper().MakeRequest(infoReq) + if err != nil { + return "", fmt.Errorf("request for router type failed: %v", err) + } + switch { + case strings.Contains(string(infoRsp), "pfsense"): + return "pfsense", nil + case strings.Contains(string(infoRsp), "vyos"): + return "vyos", nil + default: + return "", fmt.Errorf("router type not defined") + } +} + // configurePodRouter configures the pod router with proper networking settings -func (cs *CloningService) configurePodRouter(podNumber int, node string, vmid int) error { +func (cs *CloningService) configurePodRouter(podNumber int, node string, vmid int, routerType string) error { // Wait for router agent to be pingable statusReq := tools.ProxmoxAPIRequest{ Method: "POST", @@ -37,42 +59,68 @@ func (cs *CloningService) configurePodRouter(podNumber int, node string, vmid in backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) } - // Configure router WAN IP to have correct third octet using qemu agent API call - reqBody := map[string]any{ - "command": []string{ - cs.Config.WANScriptPath, - fmt.Sprintf("%s%d.1", cs.Config.WANIPBase, podNumber), - }, - } + // Clone depending on router type + switch routerType { + case "pfsense": + // Configure router WAN IP to have correct third octet using qemu agent API call + reqBody := map[string]any{ + "command": []string{ + cs.Config.WANScriptPath, + fmt.Sprintf("%s%d.1", cs.Config.WANIPBase, podNumber), + }, + } - execReq := tools.ProxmoxAPIRequest{ - Method: "POST", - Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/agent/exec", node, vmid), - RequestBody: reqBody, - } + execReq := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/agent/exec", node, vmid), + RequestBody: reqBody, + } - _, err := cs.ProxmoxService.GetRequestHelper().MakeRequest(execReq) - if err != nil { - return fmt.Errorf("failed to make IP change request: %v", err) - } + _, err := cs.ProxmoxService.GetRequestHelper().MakeRequest(execReq) + if err != nil { + return fmt.Errorf("failed to make IP change request: %v", err) + } - // Send agent exec request to change VIP subnet - vipReqBody := map[string]any{ - "command": []string{ - cs.Config.VIPScriptPath, - fmt.Sprintf("%s%d.0", cs.Config.WANIPBase, podNumber), - }, - } + // Send agent exec request to change VIP subnet + vipReqBody := map[string]any{ + "command": []string{ + cs.Config.VIPScriptPath, + fmt.Sprintf("%s%d.0", cs.Config.WANIPBase, podNumber), + }, + } - vipExecReq := tools.ProxmoxAPIRequest{ - Method: "POST", - Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/agent/exec", node, vmid), - RequestBody: vipReqBody, - } + vipExecReq := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/agent/exec", node, vmid), + RequestBody: vipReqBody, + } - _, err = cs.ProxmoxService.GetRequestHelper().MakeRequest(vipExecReq) - if err != nil { - return fmt.Errorf("failed to make VIP change request: %v", err) + _, err = cs.ProxmoxService.GetRequestHelper().MakeRequest(vipExecReq) + if err != nil { + return fmt.Errorf("failed to make VIP change request: %v", err) + } + case "vyos": + reqBody := map[string]any{ + "command": []string{ + "sh", + "-c", + fmt.Sprintf("sed -i -e 's/{{THIRD_OCTET}}/%d/g;s/{{NETWORK_PREFIX}}/%s/g' %s", podNumber, cs.Config.WANIPBase, cs.Config.VYOSScriptPath), + }, + } + + execReq := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/agent/exec", node, vmid), + RequestBody: reqBody, + } + + _, err := cs.ProxmoxService.GetRequestHelper().MakeRequest(execReq) + if err != nil { + return fmt.Errorf("failed to make IP change request: %v", err) + } + + default: + return fmt.Errorf("router type invalid") } return nil @@ -91,7 +139,7 @@ func (cs *CloningService) SetPodVnet(poolName string, vnetName string) error { log.Printf("Setting VNet %s for %d VMs in pool %s", vnetName, len(vms), poolName) - routerRegex := regexp.MustCompile(`(?i).*(router|pfsense).*`) + routerRegex := regexp.MustCompile(`(?i).*(router|pfsense|vyos).*`) var errors []string for _, vm := range vms { diff --git a/internal/cloning/pods.go b/internal/cloning/pods.go index dad1518..0ef5880 100644 --- a/internal/cloning/pods.go +++ b/internal/cloning/pods.go @@ -23,7 +23,7 @@ func (cs *CloningService) GetPods(username string) ([]Pod, error) { // Build regex pattern to match username or any of their group names groupsWithUser := append(groups, username) - regexPattern := fmt.Sprintf(`1[0-9]{3}_.*_(%s)`, strings.Join(groupsWithUser, "|")) + regexPattern := fmt.Sprintf(`(?i)1[0-9]{3}_.*_(%s)`, strings.Join(groupsWithUser, "|")) // Get pods based on regex pattern pods, err := cs.MapVirtualResourcesToPods(regexPattern) @@ -87,11 +87,11 @@ func (cs *CloningService) ValidateCloneRequest(templateName string, username str for _, pod := range podPools { // Remove the Pod ID number and _ to compare - if !alreadyDeployed && pod.Name[5:] == templateName { + if !alreadyDeployed && strings.EqualFold(pod.Name[5:], templateName) { alreadyDeployed = true } - if strings.Contains(pod.Name, username) { + if strings.Contains(strings.ToLower(pod.Name), strings.ToLower(username)) { numDeployments++ } } diff --git a/internal/cloning/types.go b/internal/cloning/types.go index 50a2a45..abefd34 100644 --- a/internal/cloning/types.go +++ b/internal/cloning/types.go @@ -7,12 +7,13 @@ import ( "github.com/cpp-cyber/proclone/internal/ldap" "github.com/cpp-cyber/proclone/internal/proxmox" + "github.com/cpp-cyber/proclone/internal/tools/sse" "github.com/gin-gonic/gin" ) // Config holds the configuration for cloning operations type Config struct { - RouterName string `envconfig:"ROUTER_NAME" default:"1-1NAT-pfsense"` + RouterName string `envconfig:"ROUTER_NAME" default:"1-1NAT-vyos"` RouterVMID int `envconfig:"ROUTER_VMID"` RouterNode string `envconfig:"ROUTER_NODE"` MinPodID int `envconfig:"MIN_POD_ID" default:"1001"` @@ -22,6 +23,7 @@ type Config struct { SDNApplyTimeout time.Duration `envconfig:"SDN_APPLY_TIMEOUT" default:"30s"` WANScriptPath string `envconfig:"WAN_SCRIPT_PATH" default:"/home/update-wan-ip.sh"` VIPScriptPath string `envconfig:"VIP_SCRIPT_PATH" default:"/home/update-wan-vip.sh"` + VYOSScriptPath string `envconfig:"VYOS_SCRIPT_PATH" default:"/config/scripts/vyos-postconfig-bootup.script"` WANIPBase string `envconfig:"WAN_IP_BASE" default:"172.16."` } @@ -116,11 +118,18 @@ type CloneRequest struct { Targets []CloneTarget CheckExistingDeployments bool // Whether to check if templates are already deployed StartingVMID int // Optional starting VMID for admin clones + SSE *sse.Writer } type RouterInfo struct { TargetName string + RouterType string PodNumber int Node string VMID int } + +type ProgressMessage struct { + Message string `json:"message"` + Progress int `json:"progress"` +} diff --git a/internal/proxmox/vms.go b/internal/proxmox/vms.go index 6b2f3be..18f817a 100644 --- a/internal/proxmox/vms.go +++ b/internal/proxmox/vms.go @@ -140,9 +140,10 @@ func (s *ProxmoxService) WaitForDisk(node string, vmID int, maxWait time.Duratio } if configResp.HardDisk != "" { + //TODO/NOTE: Using static node "gonk" here because it seems to be the most reliable pendingReq := tools.ProxmoxAPIRequest{ Method: "GET", - Endpoint: fmt.Sprintf("/nodes/%s/storage/%s/content?vmid=%d", node, s.Config.StorageID, vmID), + Endpoint: fmt.Sprintf("/nodes/gonk/storage/%s/content?vmid=%d", s.Config.StorageID, vmID), } var diskResponse []PendingDiskResponse diff --git a/internal/tools/sse/sse.go b/internal/tools/sse/sse.go new file mode 100644 index 0000000..18a7e0b --- /dev/null +++ b/internal/tools/sse/sse.go @@ -0,0 +1,26 @@ +package sse + +import ( + "encoding/json" + "fmt" + "net/http" +) + +type Writer struct { + w http.ResponseWriter + f http.Flusher +} + +func NewWriter(w http.ResponseWriter) (*Writer, error) { + f, ok := w.(http.Flusher) + if !ok { + return nil, fmt.Errorf("streaming unsupported") + } + return &Writer{w: w, f: f}, nil +} + +func (s *Writer) Send(message any) { + b, _ := json.Marshal(message) + fmt.Fprintf(s.w, "data: %s\n\n", b) + s.f.Flush() +}