From 969b177def472954e146da796eb3af4555254715 Mon Sep 17 00:00:00 2001 From: Harry Kodden Date: Thu, 11 Sep 2025 10:04:30 +0200 Subject: [PATCH 1/2] add role s3 --- roles/s3/README.md | 110 ++++++++++++ roles/s3/defaults/main.yml | 44 +++++ roles/s3/handlers/main.yml | 11 ++ roles/s3/meta/main.yml | 32 ++++ roles/s3/tasks/main.yml | 168 +++++++++++++++++ roles/s3/templates/.env.j2 | 19 ++ roles/s3/templates/docker-compose.yml.j2 | 198 +++++++++++++++++++++ roles/s3/templates/generate_htpasswd.sh.j2 | 20 +++ roles/s3/templates/nginx.conf.j2 | 62 +++++++ roles/s3/templates/s3.toml.j2 | 45 +++++ roles/s3/templates/seaweedfs-s3.service.j2 | 27 +++ roles/s3/templates/seaweedfs_s3.toml.j2 | 57 ++++++ roles/s3/vars/Debian.yml | 19 ++ roles/s3/vars/RedHat.yml | 19 ++ 14 files changed, 831 insertions(+) create mode 100644 roles/s3/README.md create mode 100644 roles/s3/defaults/main.yml create mode 100644 roles/s3/handlers/main.yml create mode 100644 roles/s3/meta/main.yml create mode 100644 roles/s3/tasks/main.yml create mode 100644 roles/s3/templates/.env.j2 create mode 100644 roles/s3/templates/docker-compose.yml.j2 create mode 100644 roles/s3/templates/generate_htpasswd.sh.j2 create mode 100644 roles/s3/templates/nginx.conf.j2 create mode 100644 roles/s3/templates/s3.toml.j2 create mode 100644 roles/s3/templates/seaweedfs-s3.service.j2 create mode 100644 roles/s3/templates/seaweedfs_s3.toml.j2 create mode 100644 roles/s3/vars/Debian.yml create mode 100644 roles/s3/vars/RedHat.yml diff --git a/roles/s3/README.md b/roles/s3/README.md new file mode 100644 index 000000000..54e8e23fd --- /dev/null +++ b/roles/s3/README.md @@ -0,0 +1,110 @@ +# S3 Role + +This role sets up an S3-compatible object storage cluster using SeaweedFS. + +## Overview + +The role implements a distributed SeaweedFS cluster with the following components: + +- Master server for coordination +- Multiple volume servers for storage +- Filer servers for file metadata and directory structure +- S3 API gateway +- Nginx proxy for routing and authentication +- Management API for administration + +## Requirements + +- Docker and Docker Compose must be installed on the target machine +- Python with docker and docker-compose modules for Ansible + +## Configuration + +### Main Variables + +| Variable | Description | Default | +|----------|-------------|---------| +| `s3_base_dir` | Base directory for S3 installation | `/opt/s3` | +| `s3_config_dir` | Directory for configuration files | `/opt/s3/config` | +| `s3_data_dir` | Directory for data storage | `/opt/s3/data` | +| `s3_log_dir` | Directory for log files | `/opt/s3/logs` | +| `s3_host_address` | Host address for the S3 service | `localhost` | +| `s3_domain_name` | Domain name for the S3 service | `s3.example.com` | +| `s3_admin_key` | AWS access key for the admin user | `AKIAIOSFODNN7EXAMPLE` | +| `s3_admin_secret` | AWS secret key for the admin user | `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` | +| `s3_auth_user` | Username for HTTP Basic Auth | `admin` | +| `s3_auth_password` | Password for HTTP Basic Auth | `password` | +| `s3_region_name` | AWS region name | `us-east-1` | +| `s3_docker_network_name` | Docker network name | `seaweedfs_network` | +| `s3_volume_count` | Number of volume servers | `3` | +| `s3_filer_count` | Number of filer servers | `1` | + +### Resource Limits + +| Variable | Description | Default | +|----------|-------------|---------| +| `s3_master_memory_limit` | Memory limit for master server | `1G` | +| `s3_master_cpu_limit` | CPU limit for master server | `1.0` | +| `s3_volume_memory_limit` | Memory limit for volume servers | `1G` | +| `s3_volume_cpu_limit` | CPU limit for volume servers | `1.0` | +| `s3_filer_memory_limit` | Memory limit for filer servers | `1G` | +| `s3_filer_cpu_limit` | CPU limit for filer servers | `1.0` | +| `s3_api_memory_limit` | Memory limit for API server | `512M` | +| `s3_api_cpu_limit` | CPU limit for API server | `0.5` | +| `s3_proxy_memory_limit` | Memory limit for Nginx proxy | `256M` | +| `s3_proxy_cpu_limit` | CPU limit for Nginx proxy | `0.2` | + +### S3 Configuration + +| Variable | Description | Default | +|----------|-------------|---------| +| `s3_address_style` | S3 addressing style | `path-style` | +| `s3_signature_version` | S3 signature version | `v4` | +| `s3_disable_bucket_policies` | Disable bucket policies | `false` | +| `s3_enable_head_dir_object` | Enable HeadDirObject API | `false` | +| `s3_enable_cors` | Enable CORS support | `true` | +| `s3_include_etag` | Include ETag in responses | `true` | +| `s3_multipart_upload_limits_mib` | Maximum multipart uploads in memory | `10000` | +| `s3_max_body_size` | Maximum upload size in Nginx | `500M` | + +## Usage + +Include the role in your playbook: + +```yaml +- hosts: s3_servers + roles: + - role: s3 + vars: + s3_host_address: "s3.example.com" + s3_admin_key: "your-access-key" + s3_admin_secret: "your-secret-key" +``` + +## Accessing the S3 Service + +After deployment, the following services will be available: + +- S3 API endpoint: http://s3_host_address/ +- Master admin UI: http://s3_host_address/master/ +- Filer admin UI: http://s3_host_address/filer/ +- Volume admin UIs: http://s3_host_address/volume1/, http://s3_host_address/volume2/, etc. +- Management API: http://s3_host_address/api/ + +Admin UIs require authentication using the configured `s3_auth_user` and `s3_auth_password`. + +## Using with AWS S3 CLI + +Example configuration: + +```bash +aws configure set aws_access_key_id +aws configure set aws_secret_access_key +aws configure set default.region +aws configure set default.s3.addressing_style path +aws --endpoint-url=http://s3_host_address s3 ls +``` + +## License + +This role is based on the [SeaWeedFS-HA-Demo](https://github.com/HarryKodden/SeaWeedFS-HA-Demo) repository. diff --git a/roles/s3/defaults/main.yml b/roles/s3/defaults/main.yml new file mode 100644 index 000000000..1b3fb9758 --- /dev/null +++ b/roles/s3/defaults/main.yml @@ -0,0 +1,44 @@ +--- +# Default variables for the S3 role + +# Base configuration +s3_base_dir: /opt/seaweedfs +s3_config_dir: "{{ s3_base_dir }}/config" +s3_data_dir: "{{ s3_base_dir }}/data" +s3_log_dir: "/var/log/seaweedfs" + +# Authentication +s3_auth_user: admin +s3_auth_password: "{{ lookup('password', '/tmp/s3_auth_password chars=ascii_letters,digits length=16') }}" + +# S3 API credentials +s3_admin_key: "{{ lookup('password', '/tmp/s3_access_key_id chars=ascii_letters,digits length=20') }}" +s3_admin_secret: "{{ lookup('password', '/tmp/s3_secret_access_key chars=ascii_letters,digits length=40') }}" +s3_region_name: "us-east-1" +s3_signature_version: "s3v4" +s3_addressing_style: "path" + +# Network configuration +s3_host_address: "{{ ansible_default_ipv4.address }}" +s3_domain_name: "{{ ansible_domain }}" + +# Cluster configuration +s3_master_count: 3 +s3_volume_count: 3 +s3_filer_count: 2 + +# Resource limits +s3_master_memory_limit: "1G" +s3_volume_memory_limit: "2G" +s3_filer_memory_limit: "2G" +s3_proxy_memory_limit: "512M" +s3_api_memory_limit: "512M" + +s3_master_cpu_limit: "0.5" +s3_volume_cpu_limit: "1.0" +s3_filer_cpu_limit: "1.0" +s3_proxy_cpu_limit: "0.5" +s3_api_cpu_limit: "0.5" + +# Docker configuration +s3_docker_network_name: "seaweedfs_network" diff --git a/roles/s3/handlers/main.yml b/roles/s3/handlers/main.yml new file mode 100644 index 000000000..55e77d73d --- /dev/null +++ b/roles/s3/handlers/main.yml @@ -0,0 +1,11 @@ +--- +# Handlers for the S3 role + +- name: reload systemd + systemd: + daemon_reload: yes + +- name: restart s3-cluster + systemd: + name: s3-cluster + state: restarted diff --git a/roles/s3/meta/main.yml b/roles/s3/meta/main.yml new file mode 100644 index 000000000..867c1de2e --- /dev/null +++ b/roles/s3/meta/main.yml @@ -0,0 +1,32 @@ +--- +galaxy_info: + role_name: s3 + author: OpenConext + description: Deploys SeaweedFS S3 cluster + company: OpenConext + license: Apache-2.0 + min_ansible_version: "2.9" + + platforms: + - name: EL + versions: + - "7" + - "8" + - "9" + - name: Debian + versions: + - all + - name: Ubuntu + versions: + - all + + galaxy_tags: + - seaweedfs + - s3 + - storage + - filesystem + - distributed + - cloud + +dependencies: +- role: docker diff --git a/roles/s3/tasks/main.yml b/roles/s3/tasks/main.yml new file mode 100644 index 000000000..5da3d61cc --- /dev/null +++ b/roles/s3/tasks/main.yml @@ -0,0 +1,168 @@ +--- +# S3 role using SeaweedFS +# Based on https://github.com/HarryKodden/SeaWeedFS-HA-Demo + +- name: Include OS-specific variables + include_vars: "{{ ansible_os_family }}.yml" + tags: + - s3 + - s3-install + +- name: Install required packages + package: + name: "{{ s3_packages }}" + state: present + tags: + - s3 + - s3-install + +- name: Ensure required services are running + service: + name: "{{ item }}" + state: started + enabled: yes + with_items: "{{ s3_services }}" + tags: + - s3 + - s3-install + +- name: Create required directories + file: + path: "{{ item }}" + state: directory + owner: root + group: root + mode: 755 + with_items: + - "{{ s3_base_dir }}" + - "{{ s3_config_dir }}" + - "{{ s3_data_dir }}" + - "{{ s3_log_dir }}" + tags: + - s3 + - s3-install + +- name: Set up environment configuration file + template: + src: .env.j2 + dest: "{{ s3_config_dir }}/.env" + owner: root + group: root + mode: 644 + tags: + - s3 + - s3-install + - s3-config + +- name: Set up S3 configuration file + template: + src: seaweedfs_s3.toml.j2 + dest: "{{ s3_config_dir }}/s3.toml" + owner: root + group: root + mode: 644 + tags: + - s3 + - s3-install + - s3-config + +- name: Set up Docker Compose file + template: + src: docker-compose.yml.j2 + dest: "{{ s3_base_dir }}/docker-compose.yml" + owner: root + group: root + mode: 644 + tags: + - s3 + - s3-install + - s3-config + +- name: Set up Nginx configuration + template: + src: nginx.conf.j2 + dest: "{{ s3_config_dir }}/nginx.conf" + owner: root + group: root + mode: 644 + tags: + - s3 + - s3-install + - s3-config + +- name: Set up htpasswd script + template: + src: generate_htpasswd.sh.j2 + dest: "{{ s3_config_dir }}/generate_htpasswd.sh" + owner: root + group: root + mode: 755 + tags: + - s3 + - s3-install + - s3-config + +- name: Create .htpasswd for basic authentication + command: "{{ s3_config_dir }}/generate_htpasswd.sh {{ s3_auth_user }} {{ s3_auth_password }}" + args: + creates: "{{ s3_config_dir }}/.htpasswd" + tags: + - s3 + - s3-install + - s3-config + +- name: Pull required Docker images + docker_image: + name: "{{ item }}" + source: pull + with_items: + - nginx:alpine + - python:3.9-slim + - chrislusf/seaweedfs:latest + tags: + - s3 + - s3-install + +- name: Start SeaweedFS S3 cluster + docker_compose: + project_src: "{{ s3_base_dir }}" + state: present + tags: + - s3 + - s3-service + +- name: Create systemd service for S3 + template: + src: seaweedfs-s3.service.j2 + dest: /etc/systemd/system/s3-cluster.service + owner: root + group: root + mode: 644 + notify: reload systemd + tags: + - s3 + - s3-service + +- name: Enable S3 service + systemd: + name: s3-cluster + enabled: yes + state: started + tags: + - s3 + - s3-service + +- name: Wait for S3 cluster to be ready + uri: + url: "http://{{ s3_host_address }}:8080/api/health" + method: GET + status_code: 200 + user: "{{ s3_auth_user }}" + password: "{{ s3_auth_password }}" + register: result + until: result.status == 200 + retries: 10 + delay: 5 + tags: + - s3 + - s3-service diff --git a/roles/s3/templates/.env.j2 b/roles/s3/templates/.env.j2 new file mode 100644 index 000000000..61ba57180 --- /dev/null +++ b/roles/s3/templates/.env.j2 @@ -0,0 +1,19 @@ +# AWS S3 Configuration +AWS_ACCESS_KEY_ID={{ s3_admin_key }} +AWS_SECRET_ACCESS_KEY={{ s3_admin_secret }} +AWS_DEFAULT_REGION={{ s3_region_name | default('us-east-1') }} +AWS_ENDPOINT_URL=http://{{ s3_host_address }}:8333 + +# S3 API Configuration +S3_ENDPOINT=http://{{ s3_host_address }}:8333 +S3_REGION={{ s3_region_name | default('us-east-1') }} + +# SeaweedFS Service URLs +SEAWEED_MASTER_URL=http://{{ s3_host_address }}:9333 +SEAWEED_VOLUME_URL=http://{{ s3_host_address }}:8080 +SEAWEED_FILER_URL=http://{{ s3_host_address }}:8888 +SEAWEED_S3_URL=http://{{ s3_host_address }}:8333 + +# Admin credentials for the HTTP Basic Auth +ADMIN_USER={{ s3_admin_user | default('admin') }} +ADMIN_PASSWORD={{ s3_admin_password | default('password') }} diff --git a/roles/s3/templates/docker-compose.yml.j2 b/roles/s3/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..c9c25757c --- /dev/null +++ b/roles/s3/templates/docker-compose.yml.j2 @@ -0,0 +1,198 @@ +version: '3.8' + +networks: + {{ s3_docker_network_name }}: + driver: bridge + +volumes: + master_data: + {% for i in range(1, s3_volume_count + 1) %} + volume{{ i }}_data: + {% endfor %} + filer_data: + etcd_data: + +# Common configurations using YAML anchors +x-common-vars: &common-vars + restart: always + networks: + - {{ s3_docker_network_name }} + +x-common-master: &common-master + <<: *common-vars + image: chrislusf/seaweedfs:latest + depends_on: + - etcd + command: ["-v=1", "master", "-mdir=/data", "-ip={{ s3_host_address }}", "-port=9333", "-peers={{ s3_host_address }}:9333", "-defaultReplication=001"] + deploy: + resources: + limits: + memory: {{ s3_master_memory_limit }} + cpus: {{ s3_master_cpu_limit }} + +x-common-volume: &common-volume + <<: *common-vars + image: chrislusf/seaweedfs:latest + command: ["-v=1", "volume", "-dir=/data", "-max=0", "-mserver={{ s3_host_address }}:9333", "-port=8080"] + deploy: + resources: + limits: + memory: {{ s3_volume_memory_limit }} + cpus: {{ s3_volume_cpu_limit }} + +x-common-filer: &common-filer + <<: *common-vars + image: chrislusf/seaweedfs:latest + depends_on: + - master1 + - volume1 + command: ["-v=1", "filer", "-master={{ s3_host_address }}:9333", "-port=8888", "-s3"] + deploy: + resources: + limits: + memory: {{ s3_filer_memory_limit }} + cpus: {{ s3_filer_cpu_limit }} + +services: + etcd: + <<: *common-vars + image: quay.io/coreos/etcd:v3.5.0 + command: ["/usr/local/bin/etcd", "--name", "etcd", "--data-dir", "/data", "--listen-client-urls", "http://0.0.0.0:2379", "--advertise-client-urls", "http://0.0.0.0:2379", "--listen-peer-urls", "http://0.0.0.0:2380", "--initial-advertise-peer-urls", "http://0.0.0.0:2380", "--initial-cluster", "etcd=http://0.0.0.0:2380", "--initial-cluster-token", "seaweedfs-token", "--initial-cluster-state", "new"] + volumes: + - etcd_data:/data + ports: + - "2379:2379" + - "2380:2380" + + master1: + <<: *common-master + container_name: master1 + volumes: + - master_data:/data + ports: + - "9333:9333" + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:9333/"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 10s + + {% for i in range(1, s3_volume_count + 1) %} + volume{{ i }}: + <<: *common-volume + container_name: volume{{ i }} + volumes: + - volume{{ i }}_data:/data + ports: + - "{{ 8080 + i - 1 }}:8080" + depends_on: + - master1 + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/status"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 5s + {% endfor %} + + {% for i in range(1, s3_filer_count + 1) %} + filer{{ i }}: + <<: *common-filer + container_name: filer{{ i }} + volumes: + - filer_data:/data + ports: + - "{{ 8888 + i - 1 }}:8888" + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8888/"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 5s + {% endfor %} + + s3: + <<: *common-vars + image: chrislusf/seaweedfs:latest + container_name: s3 + depends_on: + - master1 + - filer1 + command: ["-v=1", "s3", "-filer={{ s3_host_address }}:8888", "-port=8333", "-config=/etc/seaweedfs/s3.toml"] + volumes: + - {{ s3_config_dir }}/s3.toml:/etc/seaweedfs/s3.toml:ro + ports: + - "8333:8333" + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8333/"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 5s + deploy: + resources: + limits: + memory: {{ s3_filer_memory_limit }} + cpus: {{ s3_filer_cpu_limit }} + + api: + <<: *common-vars + image: python:3.9-slim + container_name: api + depends_on: + - master1 + - filer1 + - s3 + volumes: + - {{ s3_config_dir }}:/config:ro + - /var/run/docker.sock:/var/run/docker.sock + ports: + - "8080:8080" + environment: + - SEAWEED_MASTER_URL=http://{{ s3_host_address }}:9333 + - SEAWEED_VOLUME_URL=http://{{ s3_host_address }}:8080 + - SEAWEED_FILER_URL=http://{{ s3_host_address }}:8888 + - SEAWEED_S3_URL=http://{{ s3_host_address }}:8333 + working_dir: /app + command: > + bash -c "pip install fastapi uvicorn docker boto3 requests && + echo '#!/usr/bin/env python3' > /app/api.py && + curl -s https://raw.githubusercontent.com/HarryKodden/SeaWeedFS-HA-Demo/main/api/api.py >> /app/api.py && + python /app/api.py" + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/api/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 10s + deploy: + resources: + limits: + memory: {{ s3_api_memory_limit }} + cpus: {{ s3_api_cpu_limit }} + + proxy: + <<: *common-vars + image: nginx:alpine + container_name: proxy + depends_on: + - master1 + - filer1 + - api + volumes: + - {{ s3_config_dir }}/nginx.conf:/etc/nginx/conf.d/default.conf:ro + - {{ s3_config_dir }}/.htpasswd:/etc/nginx/.htpasswd:ro + ports: + - "80:80" + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 5s + deploy: + resources: + limits: + memory: {{ s3_proxy_memory_limit }} + cpus: {{ s3_proxy_cpu_limit }} diff --git a/roles/s3/templates/generate_htpasswd.sh.j2 b/roles/s3/templates/generate_htpasswd.sh.j2 new file mode 100644 index 000000000..7cfff5920 --- /dev/null +++ b/roles/s3/templates/generate_htpasswd.sh.j2 @@ -0,0 +1,20 @@ +#!/bin/bash +# Generate a .htpasswd file for HTTP Basic Authentication +# Usage: {{ s3_admin_user | default('admin') }} {{ s3_admin_password | default('password') }} + +USERNAME="$1" +PASSWORD="$2" +OUTPUT_FILE="{{ s3_config_dir }}/.htpasswd" + +# Check if htpasswd utility is available +if command -v htpasswd >/dev/null 2>&1; then + htpasswd -bc "$OUTPUT_FILE" "$USERNAME" "$PASSWORD" +else + # Fallback to using python if htpasswd is not available + python3 -c "import crypt,getpass,os; print('$USERNAME:'+crypt.crypt('$PASSWORD', crypt.mksalt(crypt.METHOD_SHA512)))" > "$OUTPUT_FILE" +fi + +# Set proper permissions +chmod 600 "$OUTPUT_FILE" + +echo ".htpasswd file created successfully at $OUTPUT_FILE" diff --git a/roles/s3/templates/nginx.conf.j2 b/roles/s3/templates/nginx.conf.j2 new file mode 100644 index 000000000..aec4110f1 --- /dev/null +++ b/roles/s3/templates/nginx.conf.j2 @@ -0,0 +1,62 @@ +server { + listen 80; + server_name {{ s3_domain_name }}; + + # Logging + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + # Set the max body size for uploads (adjust as needed) + client_max_body_size {{ s3_max_body_size | default('500M') }}; + + # Proxy settings + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_connect_timeout 300; + proxy_send_timeout 300; + proxy_read_timeout 300; + send_timeout 300; + + # Main route to S3 API endpoint + location / { + proxy_pass http://s3:8333; + proxy_buffering off; + } + + # Admin UI for Master + location /master/ { + auth_basic "Restricted Access"; + auth_basic_user_file /etc/nginx/.htpasswd; + proxy_pass http://master1:9333/; + } + + # Admin UI for Filer + location /filer/ { + auth_basic "Restricted Access"; + auth_basic_user_file /etc/nginx/.htpasswd; + proxy_pass http://filer1:8888/; + } + + {% for i in range(1, s3_volume_count + 1) %} + # Admin UI for Volume {{ i }} + location /volume{{ i }}/ { + auth_basic "Restricted Access"; + auth_basic_user_file /etc/nginx/.htpasswd; + proxy_pass http://volume{{ i }}:8080/; + } + {% endfor %} + + # Management API + location /api/ { + auth_basic "Restricted Access"; + auth_basic_user_file /etc/nginx/.htpasswd; + proxy_pass http://api:8080/; + } + + # Basic server status + location /status { + return 200 "Server is up and running"; + } +} diff --git a/roles/s3/templates/s3.toml.j2 b/roles/s3/templates/s3.toml.j2 new file mode 100644 index 000000000..277eed7f3 --- /dev/null +++ b/roles/s3/templates/s3.toml.j2 @@ -0,0 +1,45 @@ +# This is the SeaweedFS S3 API configuration file + +[jwt.signing] +key = "{{ s3_jwt_key | default('secret') }}" + +[s3.auditlog] +dir = "/data/s3_audit" + +[s3api] +# AWS-compatible endpoint URL is required for AWS SDK to work with virtual-host style +# The hostname or address must be the one to access from clients +enabled = true +domain_name = "{{ s3_domain_name }}" + +[s3api.access] +# Allow requests to create browser-based uploads. +allow_browser_uploads = {{ s3_allow_browser_uploads | default('true') }} + +# S3 API credentials +# Ensure these credentials match the ones in the .env file +[s3api.auth.static.admin] +access_key = "{{ s3_admin_key }}" +secret_key = "{{ s3_admin_secret }}" + +# add more credential lines here as needed +{% if s3_additional_users is defined and s3_additional_users|length > 0 %} +{% for user in s3_additional_users %} +[s3api.auth.static.{{ user.name }}] +access_key = "{{ user.access_key }}" +secret_key = "{{ user.secret_key }}" +{% endfor %} +{% endif %} + +# S3 signature validation +[s3.signature] +signature_version = {{ s3_signature_version | default(2) }} +address_style = "{{ s3_address_style | default('virtual_host') }}" + +# S3 API region settings +[s3.region] +name = "{{ s3_region_name | default('us-east-1') }}" + +# Configure the S3 API to use public URLs +[s3.public] +secure = {{ s3_secure | default('false') }} diff --git a/roles/s3/templates/seaweedfs-s3.service.j2 b/roles/s3/templates/seaweedfs-s3.service.j2 new file mode 100644 index 000000000..30fa41ecf --- /dev/null +++ b/roles/s3/templates/seaweedfs-s3.service.j2 @@ -0,0 +1,27 @@ +[Unit] +Description=SeaweedFS S3 Cluster +After=network.target +Requires=docker.service +After=docker.service + +[Service] +Type=oneshot +RemainAfterExit=yes +WorkingDirectory={{ s3_data_dir }} +Environment=PATH=/usr/local/bin:/usr/bin:/bin + +# Pull the latest images +ExecStartPre=-/usr/bin/docker compose -f {{ s3_data_dir }}/docker-compose.yml pull + +# Start the containers +ExecStart=/usr/bin/docker compose -f {{ s3_data_dir }}/docker-compose.yml up -d + +# Stop the containers +ExecStop=/usr/bin/docker compose -f {{ s3_data_dir }}/docker-compose.yml down + +# Restart policy +Restart=on-failure +RestartSec=10s + +[Install] +WantedBy=multi-user.target diff --git a/roles/s3/templates/seaweedfs_s3.toml.j2 b/roles/s3/templates/seaweedfs_s3.toml.j2 new file mode 100644 index 000000000..3ec76c1a9 --- /dev/null +++ b/roles/s3/templates/seaweedfs_s3.toml.j2 @@ -0,0 +1,57 @@ +# SeaweedFS S3 Configuration File + +[aws] +# The region name used when returning the region in the S3 API responses +# Mandatory +region = "{{ s3_region_name | default('us-east-1') }}" + +[s3.auths] +# The admin credentials used to access s3 +# Default: none +# Multiple credentials allowed +id_key = "{{ s3_admin_key }}" +id_secret = "{{ s3_admin_secret }}" + +{% if s3_additional_users | default([]) %} +{% for user in s3_additional_users %} +id_key = "{{ user.key }}" +id_secret = "{{ user.secret }}" +{% endfor %} +{% endif %} + +[s3.admin] +# The admin credentials used to manage buckets +disableBucketPolicies = {{ s3_disable_bucket_policies | default('false') | lower }} +# Default: none +id_key = "{{ s3_admin_key }}" +id_secret = "{{ s3_admin_secret }}" + +[s3.options] +# The addressing style when accessing the S3 API +# Valid values: path-style, virtual-hosted-style +# Default: path-style +address_style = "{{ s3_address_style | default('path-style') }}" + +# The signature version, valid values: "v2", "v4" +# Default: v4 +signature_version = "{{ s3_signature_version | default('v4') }}" + +# Whether to support HeadDirObject S3 API, the SeaweedFS specific API as an optimization +# Default: false +enable_head_dir_object = {{ s3_enable_head_dir_object | default('false') | lower }} + +# Maximum multipart uploads kept in memory +# Default: 10000 +multipartUploadLimitsMiB = {{ s3_multipart_upload_limits_mib | default(10000) }} + +# Whether to allow listing all buckets for any authenticated user +# Default: false +allow_anonymous_list_buckets = {{ s3_allow_anonymous_list_buckets | default('false') | lower }} + +# Whether to support cross-origin resource sharing (CORS) of objects +# Default: true +enable_CORS = {{ s3_enable_cors | default('true') | lower }} + +# Whether to include etag in the response +# Default: true +include_ETag = {{ s3_include_etag | default('true') | lower }} diff --git a/roles/s3/vars/Debian.yml b/roles/s3/vars/Debian.yml new file mode 100644 index 000000000..e054c567b --- /dev/null +++ b/roles/s3/vars/Debian.yml @@ -0,0 +1,19 @@ +--- +# Debian family specific variables for s3 role + +s3_packages: +- docker.io +- docker-compose +- python3-docker +- python3-docker-compose +- apache2-utils # For htpasswd command + +s3_services: +- docker + +# System dependencies +seaweedfs_dependencies: +- wget +- curl +- jq +- python3 diff --git a/roles/s3/vars/RedHat.yml b/roles/s3/vars/RedHat.yml new file mode 100644 index 000000000..e377ebf1b --- /dev/null +++ b/roles/s3/vars/RedHat.yml @@ -0,0 +1,19 @@ +--- +# RedHat family specific variables for s3 role + +s3_packages: +- docker +- docker-compose +- python3-docker +- python3-docker-compose +- httpd-tools # For htpasswd command + +s3_services: +- docker + +# System dependencies +seaweedfs_dependencies: +- wget +- curl +- jq +- python3 From 67e9add8394656cfaca0dbf644471a15db43fa54 Mon Sep 17 00:00:00 2001 From: Harry Kodden Date: Fri, 12 Sep 2025 15:14:55 +0200 Subject: [PATCH 2/2] update s3 --- roles/s3/README.md | 61 ++--- roles/s3/defaults/main.yml | 46 +--- roles/s3/handlers/main-new.yml | 56 ++++ roles/s3/handlers/main.yml | 61 ++++- roles/s3/tasks/main.yml | 291 +++++++++++++-------- roles/s3/templates/.env.j2 | 19 -- roles/s3/templates/docker-compose.yml.j2 | 198 -------------- roles/s3/templates/generate_htpasswd.sh.j2 | 20 -- roles/s3/templates/nginx.conf.j2 | 62 ----- roles/s3/templates/s3.json.j2 | 30 +++ roles/s3/templates/s3.toml.j2 | 45 ---- roles/s3/templates/seaweedfs-s3.service.j2 | 27 -- roles/s3/templates/seaweedfs_s3.toml.j2 | 57 ---- roles/s3/vars/Debian.yml | 3 - roles/s3/vars/RedHat.yml | 3 - 15 files changed, 358 insertions(+), 621 deletions(-) create mode 100644 roles/s3/handlers/main-new.yml delete mode 100644 roles/s3/templates/.env.j2 delete mode 100644 roles/s3/templates/docker-compose.yml.j2 delete mode 100644 roles/s3/templates/generate_htpasswd.sh.j2 delete mode 100644 roles/s3/templates/nginx.conf.j2 create mode 100644 roles/s3/templates/s3.json.j2 delete mode 100644 roles/s3/templates/s3.toml.j2 delete mode 100644 roles/s3/templates/seaweedfs-s3.service.j2 delete mode 100644 roles/s3/templates/seaweedfs_s3.toml.j2 diff --git a/roles/s3/README.md b/roles/s3/README.md index 54e8e23fd..7bd7b2a23 100644 --- a/roles/s3/README.md +++ b/roles/s3/README.md @@ -1,22 +1,22 @@ # S3 Role -This role sets up an S3-compatible object storage cluster using SeaweedFS. +This role sets up an S3-compatible object storage cluster using SeaweedFS, integrated with the OpenConext environment. ## Overview The role implements a distributed SeaweedFS cluster with the following components: -- Master server for coordination -- Multiple volume servers for storage -- Filer servers for file metadata and directory structure -- S3 API gateway -- Nginx proxy for routing and authentication -- Management API for administration +- Multiple master servers for high availability and coordination +- Multiple volume servers for distributed storage +- Filer server for file metadata and directory structure +- S3 API gateway for S3-compatible access + +All services are deployed as Docker containers and integrated with the existing loadbalancer network. ## Requirements -- Docker and Docker Compose must be installed on the target machine -- Python with docker and docker-compose modules for Ansible +- Docker must be installed on the target machine (handled by the docker role dependency) +- Python with docker module for Ansible ## Configuration @@ -24,30 +24,25 @@ The role implements a distributed SeaweedFS cluster with the following component | Variable | Description | Default | |----------|-------------|---------| -| `s3_base_dir` | Base directory for S3 installation | `/opt/s3` | -| `s3_config_dir` | Directory for configuration files | `/opt/s3/config` | -| `s3_data_dir` | Directory for data storage | `/opt/s3/data` | -| `s3_log_dir` | Directory for log files | `/opt/s3/logs` | -| `s3_host_address` | Host address for the S3 service | `localhost` | -| `s3_domain_name` | Domain name for the S3 service | `s3.example.com` | -| `s3_admin_key` | AWS access key for the admin user | `AKIAIOSFODNN7EXAMPLE` | -| `s3_admin_secret` | AWS secret key for the admin user | `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` | -| `s3_auth_user` | Username for HTTP Basic Auth | `admin` | -| `s3_auth_password` | Password for HTTP Basic Auth | `password` | -| `s3_region_name` | AWS region name | `us-east-1` | -| `s3_docker_network_name` | Docker network name | `seaweedfs_network` | -| `s3_volume_count` | Number of volume servers | `3` | -| `s3_filer_count` | Number of filer servers | `1` | - -### Resource Limits - -| Variable | Description | Default | -|----------|-------------|---------| -| `s3_master_memory_limit` | Memory limit for master server | `1G` | -| `s3_master_cpu_limit` | CPU limit for master server | `1.0` | -| `s3_volume_memory_limit` | Memory limit for volume servers | `1G` | -| `s3_volume_cpu_limit` | CPU limit for volume servers | `1.0` | -| `s3_filer_memory_limit` | Memory limit for filer servers | `1G` | +| `s3_base_dir` | Base directory for S3 installation | `/opt/openconext/seaweedfs` | +| `s3_config_dir` | Directory for configuration files | `{{ s3_base_dir }}/config` | +| `s3_data_dir` | Directory for data storage | `{{ s3_base_dir }}/data` | +| `s3_access_key` | S3 access key for the admin user | `admin` | +| `s3_secret_key` | S3 secret key for the admin user | Generated random string | +| `s3_readonly_access_key` | S3 access key for read-only user | `readonly` | +| `s3_readonly_secret_key` | S3 secret key for read-only user | Generated random string | +| `s3_cors_origin` | CORS origin allowed for S3 API | `*` | +| `s3_filer_domain` | Domain name for the filer service | `filer.{{ base_domain }}` | +| `s3_api_domain` | Domain name for the S3 API service | `s3.{{ base_domain }}` | + +## Integration with OpenConext + +The S3 role integrates with the existing OpenConext deployment by: + +1. Using the same loadbalancer network for container networking +2. Using Traefik for routing requests to the appropriate containers +3. Following the same pattern for container management as other OpenConext roles +4. Sharing the base domain configuration for consistent URL patterns | `s3_filer_cpu_limit` | CPU limit for filer servers | `1.0` | | `s3_api_memory_limit` | Memory limit for API server | `512M` | | `s3_api_cpu_limit` | CPU limit for API server | `0.5` | diff --git a/roles/s3/defaults/main.yml b/roles/s3/defaults/main.yml index 1b3fb9758..b7001944c 100644 --- a/roles/s3/defaults/main.yml +++ b/roles/s3/defaults/main.yml @@ -2,43 +2,19 @@ # Default variables for the S3 role # Base configuration -s3_base_dir: /opt/seaweedfs +s3_base_dir: /opt/openconext/seaweedfs s3_config_dir: "{{ s3_base_dir }}/config" s3_data_dir: "{{ s3_base_dir }}/data" -s3_log_dir: "/var/log/seaweedfs" -# Authentication -s3_auth_user: admin -s3_auth_password: "{{ lookup('password', '/tmp/s3_auth_password chars=ascii_letters,digits length=16') }}" +# S3 credentials - these can be overridden by values in the secrets file +s3_access_key: "{{ s3_access_key | default('admin') }}" +s3_secret_key: "{{ s3_secret_key | default(lookup('password', '/tmp/s3_secret_key chars=ascii_letters,digits length=40')) }}" +s3_readonly_access_key: "{{ s3_readonly_access_key | default('readonly') }}" +s3_readonly_secret_key: "{{ s3_readonly_secret_key | default(lookup('password', '/tmp/s3_readonly_secret_key chars=ascii_letters,digits length=40')) }}" -# S3 API credentials -s3_admin_key: "{{ lookup('password', '/tmp/s3_access_key_id chars=ascii_letters,digits length=20') }}" -s3_admin_secret: "{{ lookup('password', '/tmp/s3_secret_access_key chars=ascii_letters,digits length=40') }}" -s3_region_name: "us-east-1" -s3_signature_version: "s3v4" -s3_addressing_style: "path" +# CORS configuration +s3_cors_origin: "*" -# Network configuration -s3_host_address: "{{ ansible_default_ipv4.address }}" -s3_domain_name: "{{ ansible_domain }}" - -# Cluster configuration -s3_master_count: 3 -s3_volume_count: 3 -s3_filer_count: 2 - -# Resource limits -s3_master_memory_limit: "1G" -s3_volume_memory_limit: "2G" -s3_filer_memory_limit: "2G" -s3_proxy_memory_limit: "512M" -s3_api_memory_limit: "512M" - -s3_master_cpu_limit: "0.5" -s3_volume_cpu_limit: "1.0" -s3_filer_cpu_limit: "1.0" -s3_proxy_cpu_limit: "0.5" -s3_api_cpu_limit: "0.5" - -# Docker configuration -s3_docker_network_name: "seaweedfs_network" +# Domain configuration for S3 endpoints +s3_filer_domain: "filer.{{ base_domain }}" +s3_api_domain: "s3.{{ base_domain }}" diff --git a/roles/s3/handlers/main-new.yml b/roles/s3/handlers/main-new.yml new file mode 100644 index 000000000..a9ab2272f --- /dev/null +++ b/roles/s3/handlers/main-new.yml @@ -0,0 +1,56 @@ +--- +# Handlers for the S3 role + +- name: restart master1 + community.docker.docker_container: + name: master1 + restart: true + +- name: restart master2 + community.docker.docker_container: + name: master2 + restart: true + +- name: restart master3 + community.docker.docker_container: + name: master3 + restart: true + +- name: restart volume1 + community.docker.docker_container: + name: volume1 + restart: true + +- name: restart volume2 + community.docker.docker_container: + name: volume2 + restart: true + +- name: restart volume3 + community.docker.docker_container: + name: volume3 + restart: true + +- name: restart filer + community.docker.docker_container: + name: filer + restart: true + +- name: restart s3 + community.docker.docker_container: + name: s3 + restart: true + +- name: restart all s3 services + community.docker.docker_container: + name: "{{ item }}" + restart: true + with_items: + - master1 + - master2 + - master3 + - volume1 + - volume2 + - volume3 + - filer + - s3 diff --git a/roles/s3/handlers/main.yml b/roles/s3/handlers/main.yml index 55e77d73d..ec265bf10 100644 --- a/roles/s3/handlers/main.yml +++ b/roles/s3/handlers/main.yml @@ -1,11 +1,56 @@ --- # Handlers for the S3 role -- name: reload systemd - systemd: - daemon_reload: yes - -- name: restart s3-cluster - systemd: - name: s3-cluster - state: restarted +- name: restart master1 + community.docker.docker_container: + name: master1 + restart: true + +- name: restart master2 + community.docker.docker_container: + name: master2 + restart: true + +- name: restart master3 + community.docker.docker_container: + name: master3 + restart: true + +- name: restart volume1 + community.docker.docker_container: + name: volume1 + restart: true + +- name: restart volume2 + community.docker.docker_container: + name: volume2 + restart: true + +- name: restart volume3 + community.docker.docker_container: + name: volume3 + restart: true + +- name: restart filer + community.docker.docker_container: + name: filer + restart: true + +- name: restart s3 + community.docker.docker_container: + name: s3 + restart: true + +- name: restart all s3 services + community.docker.docker_container: + name: "{{ item }}" + restart: true + with_items: + - master1 + - master2 + - master3 + - volume1 + - volume2 + - volume3 + - filer + - s3 diff --git a/roles/s3/tasks/main.yml b/roles/s3/tasks/main.yml index 5da3d61cc..f6035db73 100644 --- a/roles/s3/tasks/main.yml +++ b/roles/s3/tasks/main.yml @@ -9,160 +9,229 @@ - s3-install - name: Install required packages - package: + ansible.builtin.package: name: "{{ s3_packages }}" state: present tags: - s3 - s3-install -- name: Ensure required services are running - service: - name: "{{ item }}" - state: started - enabled: yes - with_items: "{{ s3_services }}" - tags: - - s3 - - s3-install - - name: Create required directories - file: + ansible.builtin.file: path: "{{ item }}" state: directory owner: root group: root - mode: 755 + mode: "0755" with_items: - - "{{ s3_base_dir }}" - - "{{ s3_config_dir }}" - - "{{ s3_data_dir }}" - - "{{ s3_log_dir }}" + - "/opt/openconext/seaweedfs" + - "/opt/openconext/seaweedfs/config" + - "/opt/openconext/seaweedfs/data/master1" + - "/opt/openconext/seaweedfs/data/master2" + - "/opt/openconext/seaweedfs/data/master3" + - "/opt/openconext/seaweedfs/data/volume1" + - "/opt/openconext/seaweedfs/data/volume2" + - "/opt/openconext/seaweedfs/data/volume3" + - "/opt/openconext/seaweedfs/data/filer" tags: - s3 - s3-install -- name: Set up environment configuration file - template: - src: .env.j2 - dest: "{{ s3_config_dir }}/.env" +- name: Create S3 JSON configuration + ansible.builtin.template: + src: s3.json.j2 + dest: "/opt/openconext/seaweedfs/config/s3.json" owner: root group: root - mode: 644 + mode: "0644" tags: - s3 - - s3-install - s3-config -- name: Set up S3 configuration file - template: - src: seaweedfs_s3.toml.j2 - dest: "{{ s3_config_dir }}/s3.toml" - owner: root - group: root - mode: 644 - tags: - - s3 - - s3-install - - s3-config - -- name: Set up Docker Compose file - template: - src: docker-compose.yml.j2 - dest: "{{ s3_base_dir }}/docker-compose.yml" - owner: root - group: root - mode: 644 - tags: - - s3 - - s3-install - - s3-config - -- name: Set up Nginx configuration - template: - src: nginx.conf.j2 - dest: "{{ s3_config_dir }}/nginx.conf" - owner: root - group: root - mode: 644 +# Deploy master nodes +- name: Create and start master1 container + community.docker.docker_container: + name: master1 + image: chrislusf/seaweedfs:latest + pull: true + restart_policy: "always" + state: started + networks: + - name: "loadbalancer" + command: "master -port=9333 -ip=master1 -mdir=/data -peers=master1:9333,master2:9333,master3:9333" + volumes: + - "/opt/openconext/seaweedfs/data/master1:/data" + healthcheck: + test: [ "CMD", "wget", "--quiet", "--tries=1", "--spider", "http://master1:9333/" ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 10s tags: - s3 - - s3-install - - s3-config + - s3-deploy -- name: Set up htpasswd script - template: - src: generate_htpasswd.sh.j2 - dest: "{{ s3_config_dir }}/generate_htpasswd.sh" - owner: root - group: root - mode: 755 +- name: Create and start master2 container + community.docker.docker_container: + name: master2 + image: chrislusf/seaweedfs:latest + pull: true + restart_policy: "always" + state: started + networks: + - name: "loadbalancer" + command: "master -port=9333 -ip=master2 -mdir=/data -peers=master1:9333,master2:9333,master3:9333" + volumes: + - "/opt/openconext/seaweedfs/data/master2:/data" + healthcheck: + test: [ "CMD", "wget", "--quiet", "--tries=1", "--spider", "http://master2:9333/" ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 10s tags: - s3 - - s3-install - - s3-config + - s3-deploy -- name: Create .htpasswd for basic authentication - command: "{{ s3_config_dir }}/generate_htpasswd.sh {{ s3_auth_user }} {{ s3_auth_password }}" - args: - creates: "{{ s3_config_dir }}/.htpasswd" +- name: Create and start master3 container + community.docker.docker_container: + name: master3 + image: chrislusf/seaweedfs:latest + pull: true + restart_policy: "always" + state: started + networks: + - name: "loadbalancer" + command: "master -port=9333 -ip=master3 -mdir=/data -peers=master1:9333,master2:9333,master3:9333" + volumes: + - "/opt/openconext/seaweedfs/data/master3:/data" + healthcheck: + test: [ "CMD", "wget", "--quiet", "--tries=1", "--spider", "http://master3:9333/" ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 10s tags: - s3 - - s3-install - - s3-config + - s3-deploy -- name: Pull required Docker images - docker_image: - name: "{{ item }}" - source: pull - with_items: - - nginx:alpine - - python:3.9-slim - - chrislusf/seaweedfs:latest +# Deploy volume nodes +- name: Create and start volume1 container + community.docker.docker_container: + name: volume1 + image: chrislusf/seaweedfs:latest + pull: true + restart_policy: "always" + state: started + networks: + - name: "loadbalancer" + command: "volume -port=8080 -ip=volume1 -mserver=master1:9333,master2:9333,master3:9333 -dir=/data -max=5 -rack=rack1" + volumes: + - "/opt/openconext/seaweedfs/data/volume1:/data" + healthcheck: + test: [ "CMD", "wget", "--quiet", "--tries=1", "--spider", "http://volume1:8080/status" ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 5s tags: - s3 - - s3-install + - s3-deploy -- name: Start SeaweedFS S3 cluster - docker_compose: - project_src: "{{ s3_base_dir }}" - state: present +- name: Create and start volume2 container + community.docker.docker_container: + name: volume2 + image: chrislusf/seaweedfs:latest + pull: true + restart_policy: "always" + state: started + networks: + - name: "loadbalancer" + command: "volume -port=8080 -ip=volume2 -mserver=master1:9333,master2:9333,master3:9333 -dir=/data -max=5 -rack=rack1" + volumes: + - "/opt/openconext/seaweedfs/data/volume2:/data" + healthcheck: + test: [ "CMD", "wget", "--quiet", "--tries=1", "--spider", "http://volume2:8080/status" ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 5s tags: - s3 - - s3-service + - s3-deploy -- name: Create systemd service for S3 - template: - src: seaweedfs-s3.service.j2 - dest: /etc/systemd/system/s3-cluster.service - owner: root - group: root - mode: 644 - notify: reload systemd +- name: Create and start volume3 container + community.docker.docker_container: + name: volume3 + image: chrislusf/seaweedfs:latest + pull: true + restart_policy: "always" + state: started + networks: + - name: "loadbalancer" + command: "volume -port=8080 -ip=volume3 -mserver=master1:9333,master2:9333,master3:9333 -dir=/data -max=5 -rack=rack1" + volumes: + - "/opt/openconext/seaweedfs/data/volume3:/data" + healthcheck: + test: [ "CMD", "wget", "--quiet", "--tries=1", "--spider", "http://volume3:8080/status" ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 5s tags: - s3 - - s3-service + - s3-deploy -- name: Enable S3 service - systemd: - name: s3-cluster - enabled: yes +# Deploy filer +- name: Create and start filer container + community.docker.docker_container: + name: filer + image: chrislusf/seaweedfs:latest + pull: true + restart_policy: "always" state: started + networks: + - name: "loadbalancer" + command: "filer -master=master1:9333,master2:9333,master3:9333 -ip=0.0.0.0 -port=8888 -dataCenter=dc1 -maxMB=1024" + volumes: + - "/opt/openconext/seaweedfs/data/filer:/data" + labels: + traefik.http.routers.filer.rule: "Host(`filer.{{ base_domain }}`)" + traefik.http.routers.filer.tls: "true" + traefik.enable: "true" + healthcheck: + test: [ "CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8888/" ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 5s tags: - s3 - - s3-service + - s3-deploy -- name: Wait for S3 cluster to be ready - uri: - url: "http://{{ s3_host_address }}:8080/api/health" - method: GET - status_code: 200 - user: "{{ s3_auth_user }}" - password: "{{ s3_auth_password }}" - register: result - until: result.status == 200 - retries: 10 - delay: 5 +# Deploy S3 API endpoint +- name: Create and start S3 container + community.docker.docker_container: + name: s3 + image: chrislusf/seaweedfs:latest + pull: true + restart_policy: "always" + state: started + networks: + - name: "loadbalancer" + command: "s3 -filer=filer:8888 -ip.bind=0.0.0.0 -port=8333 -config=/etc/seaweedfs/s3.json" + volumes: + - "/opt/openconext/seaweedfs/config/s3.json:/etc/seaweedfs/s3.json:ro" + labels: + traefik.http.routers.s3.rule: "Host(`s3.{{ base_domain }}`)" + traefik.http.routers.s3.tls: "true" + traefik.enable: "true" + healthcheck: + test: [ "CMD", "sh", "-c", "netstat -tlnp | grep :8333" ] + interval: 10s + timeout: 5s + retries: 3 + start_period: 10s tags: - s3 - - s3-service + - s3-deploy diff --git a/roles/s3/templates/.env.j2 b/roles/s3/templates/.env.j2 deleted file mode 100644 index 61ba57180..000000000 --- a/roles/s3/templates/.env.j2 +++ /dev/null @@ -1,19 +0,0 @@ -# AWS S3 Configuration -AWS_ACCESS_KEY_ID={{ s3_admin_key }} -AWS_SECRET_ACCESS_KEY={{ s3_admin_secret }} -AWS_DEFAULT_REGION={{ s3_region_name | default('us-east-1') }} -AWS_ENDPOINT_URL=http://{{ s3_host_address }}:8333 - -# S3 API Configuration -S3_ENDPOINT=http://{{ s3_host_address }}:8333 -S3_REGION={{ s3_region_name | default('us-east-1') }} - -# SeaweedFS Service URLs -SEAWEED_MASTER_URL=http://{{ s3_host_address }}:9333 -SEAWEED_VOLUME_URL=http://{{ s3_host_address }}:8080 -SEAWEED_FILER_URL=http://{{ s3_host_address }}:8888 -SEAWEED_S3_URL=http://{{ s3_host_address }}:8333 - -# Admin credentials for the HTTP Basic Auth -ADMIN_USER={{ s3_admin_user | default('admin') }} -ADMIN_PASSWORD={{ s3_admin_password | default('password') }} diff --git a/roles/s3/templates/docker-compose.yml.j2 b/roles/s3/templates/docker-compose.yml.j2 deleted file mode 100644 index c9c25757c..000000000 --- a/roles/s3/templates/docker-compose.yml.j2 +++ /dev/null @@ -1,198 +0,0 @@ -version: '3.8' - -networks: - {{ s3_docker_network_name }}: - driver: bridge - -volumes: - master_data: - {% for i in range(1, s3_volume_count + 1) %} - volume{{ i }}_data: - {% endfor %} - filer_data: - etcd_data: - -# Common configurations using YAML anchors -x-common-vars: &common-vars - restart: always - networks: - - {{ s3_docker_network_name }} - -x-common-master: &common-master - <<: *common-vars - image: chrislusf/seaweedfs:latest - depends_on: - - etcd - command: ["-v=1", "master", "-mdir=/data", "-ip={{ s3_host_address }}", "-port=9333", "-peers={{ s3_host_address }}:9333", "-defaultReplication=001"] - deploy: - resources: - limits: - memory: {{ s3_master_memory_limit }} - cpus: {{ s3_master_cpu_limit }} - -x-common-volume: &common-volume - <<: *common-vars - image: chrislusf/seaweedfs:latest - command: ["-v=1", "volume", "-dir=/data", "-max=0", "-mserver={{ s3_host_address }}:9333", "-port=8080"] - deploy: - resources: - limits: - memory: {{ s3_volume_memory_limit }} - cpus: {{ s3_volume_cpu_limit }} - -x-common-filer: &common-filer - <<: *common-vars - image: chrislusf/seaweedfs:latest - depends_on: - - master1 - - volume1 - command: ["-v=1", "filer", "-master={{ s3_host_address }}:9333", "-port=8888", "-s3"] - deploy: - resources: - limits: - memory: {{ s3_filer_memory_limit }} - cpus: {{ s3_filer_cpu_limit }} - -services: - etcd: - <<: *common-vars - image: quay.io/coreos/etcd:v3.5.0 - command: ["/usr/local/bin/etcd", "--name", "etcd", "--data-dir", "/data", "--listen-client-urls", "http://0.0.0.0:2379", "--advertise-client-urls", "http://0.0.0.0:2379", "--listen-peer-urls", "http://0.0.0.0:2380", "--initial-advertise-peer-urls", "http://0.0.0.0:2380", "--initial-cluster", "etcd=http://0.0.0.0:2380", "--initial-cluster-token", "seaweedfs-token", "--initial-cluster-state", "new"] - volumes: - - etcd_data:/data - ports: - - "2379:2379" - - "2380:2380" - - master1: - <<: *common-master - container_name: master1 - volumes: - - master_data:/data - ports: - - "9333:9333" - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:9333/"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 10s - - {% for i in range(1, s3_volume_count + 1) %} - volume{{ i }}: - <<: *common-volume - container_name: volume{{ i }} - volumes: - - volume{{ i }}_data:/data - ports: - - "{{ 8080 + i - 1 }}:8080" - depends_on: - - master1 - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/status"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 5s - {% endfor %} - - {% for i in range(1, s3_filer_count + 1) %} - filer{{ i }}: - <<: *common-filer - container_name: filer{{ i }} - volumes: - - filer_data:/data - ports: - - "{{ 8888 + i - 1 }}:8888" - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8888/"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 5s - {% endfor %} - - s3: - <<: *common-vars - image: chrislusf/seaweedfs:latest - container_name: s3 - depends_on: - - master1 - - filer1 - command: ["-v=1", "s3", "-filer={{ s3_host_address }}:8888", "-port=8333", "-config=/etc/seaweedfs/s3.toml"] - volumes: - - {{ s3_config_dir }}/s3.toml:/etc/seaweedfs/s3.toml:ro - ports: - - "8333:8333" - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8333/"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 5s - deploy: - resources: - limits: - memory: {{ s3_filer_memory_limit }} - cpus: {{ s3_filer_cpu_limit }} - - api: - <<: *common-vars - image: python:3.9-slim - container_name: api - depends_on: - - master1 - - filer1 - - s3 - volumes: - - {{ s3_config_dir }}:/config:ro - - /var/run/docker.sock:/var/run/docker.sock - ports: - - "8080:8080" - environment: - - SEAWEED_MASTER_URL=http://{{ s3_host_address }}:9333 - - SEAWEED_VOLUME_URL=http://{{ s3_host_address }}:8080 - - SEAWEED_FILER_URL=http://{{ s3_host_address }}:8888 - - SEAWEED_S3_URL=http://{{ s3_host_address }}:8333 - working_dir: /app - command: > - bash -c "pip install fastapi uvicorn docker boto3 requests && - echo '#!/usr/bin/env python3' > /app/api.py && - curl -s https://raw.githubusercontent.com/HarryKodden/SeaWeedFS-HA-Demo/main/api/api.py >> /app/api.py && - python /app/api.py" - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/api/health"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 10s - deploy: - resources: - limits: - memory: {{ s3_api_memory_limit }} - cpus: {{ s3_api_cpu_limit }} - - proxy: - <<: *common-vars - image: nginx:alpine - container_name: proxy - depends_on: - - master1 - - filer1 - - api - volumes: - - {{ s3_config_dir }}/nginx.conf:/etc/nginx/conf.d/default.conf:ro - - {{ s3_config_dir }}/.htpasswd:/etc/nginx/.htpasswd:ro - ports: - - "80:80" - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 5s - deploy: - resources: - limits: - memory: {{ s3_proxy_memory_limit }} - cpus: {{ s3_proxy_cpu_limit }} diff --git a/roles/s3/templates/generate_htpasswd.sh.j2 b/roles/s3/templates/generate_htpasswd.sh.j2 deleted file mode 100644 index 7cfff5920..000000000 --- a/roles/s3/templates/generate_htpasswd.sh.j2 +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Generate a .htpasswd file for HTTP Basic Authentication -# Usage: {{ s3_admin_user | default('admin') }} {{ s3_admin_password | default('password') }} - -USERNAME="$1" -PASSWORD="$2" -OUTPUT_FILE="{{ s3_config_dir }}/.htpasswd" - -# Check if htpasswd utility is available -if command -v htpasswd >/dev/null 2>&1; then - htpasswd -bc "$OUTPUT_FILE" "$USERNAME" "$PASSWORD" -else - # Fallback to using python if htpasswd is not available - python3 -c "import crypt,getpass,os; print('$USERNAME:'+crypt.crypt('$PASSWORD', crypt.mksalt(crypt.METHOD_SHA512)))" > "$OUTPUT_FILE" -fi - -# Set proper permissions -chmod 600 "$OUTPUT_FILE" - -echo ".htpasswd file created successfully at $OUTPUT_FILE" diff --git a/roles/s3/templates/nginx.conf.j2 b/roles/s3/templates/nginx.conf.j2 deleted file mode 100644 index aec4110f1..000000000 --- a/roles/s3/templates/nginx.conf.j2 +++ /dev/null @@ -1,62 +0,0 @@ -server { - listen 80; - server_name {{ s3_domain_name }}; - - # Logging - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; - - # Set the max body size for uploads (adjust as needed) - client_max_body_size {{ s3_max_body_size | default('500M') }}; - - # Proxy settings - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_connect_timeout 300; - proxy_send_timeout 300; - proxy_read_timeout 300; - send_timeout 300; - - # Main route to S3 API endpoint - location / { - proxy_pass http://s3:8333; - proxy_buffering off; - } - - # Admin UI for Master - location /master/ { - auth_basic "Restricted Access"; - auth_basic_user_file /etc/nginx/.htpasswd; - proxy_pass http://master1:9333/; - } - - # Admin UI for Filer - location /filer/ { - auth_basic "Restricted Access"; - auth_basic_user_file /etc/nginx/.htpasswd; - proxy_pass http://filer1:8888/; - } - - {% for i in range(1, s3_volume_count + 1) %} - # Admin UI for Volume {{ i }} - location /volume{{ i }}/ { - auth_basic "Restricted Access"; - auth_basic_user_file /etc/nginx/.htpasswd; - proxy_pass http://volume{{ i }}:8080/; - } - {% endfor %} - - # Management API - location /api/ { - auth_basic "Restricted Access"; - auth_basic_user_file /etc/nginx/.htpasswd; - proxy_pass http://api:8080/; - } - - # Basic server status - location /status { - return 200 "Server is up and running"; - } -} diff --git a/roles/s3/templates/s3.json.j2 b/roles/s3/templates/s3.json.j2 new file mode 100644 index 000000000..7c8d9ac82 --- /dev/null +++ b/roles/s3/templates/s3.json.j2 @@ -0,0 +1,30 @@ +{ + "identities": [ + { + "name": "admin", + "credentials": [ + { + "accessKey": "{{ s3_access_key | default('admin') }}", + "secretKey": "{{ s3_secret_key | default('password') }}" + } + ], + "actions": ["Admin", "Read", "Write"] + }, + { + "name": "readonly", + "credentials": [ + { + "accessKey": "{{ s3_readonly_access_key | default('readonly') }}", + "secretKey": "{{ s3_readonly_secret_key | default('password') }}" + } + ], + "actions": ["Read"] + } + ], + "domains": [ + { + "name": "*", + "allowedOrigins": ["{{ s3_cors_origin | default('*') }}"] + } + ] +} diff --git a/roles/s3/templates/s3.toml.j2 b/roles/s3/templates/s3.toml.j2 deleted file mode 100644 index 277eed7f3..000000000 --- a/roles/s3/templates/s3.toml.j2 +++ /dev/null @@ -1,45 +0,0 @@ -# This is the SeaweedFS S3 API configuration file - -[jwt.signing] -key = "{{ s3_jwt_key | default('secret') }}" - -[s3.auditlog] -dir = "/data/s3_audit" - -[s3api] -# AWS-compatible endpoint URL is required for AWS SDK to work with virtual-host style -# The hostname or address must be the one to access from clients -enabled = true -domain_name = "{{ s3_domain_name }}" - -[s3api.access] -# Allow requests to create browser-based uploads. -allow_browser_uploads = {{ s3_allow_browser_uploads | default('true') }} - -# S3 API credentials -# Ensure these credentials match the ones in the .env file -[s3api.auth.static.admin] -access_key = "{{ s3_admin_key }}" -secret_key = "{{ s3_admin_secret }}" - -# add more credential lines here as needed -{% if s3_additional_users is defined and s3_additional_users|length > 0 %} -{% for user in s3_additional_users %} -[s3api.auth.static.{{ user.name }}] -access_key = "{{ user.access_key }}" -secret_key = "{{ user.secret_key }}" -{% endfor %} -{% endif %} - -# S3 signature validation -[s3.signature] -signature_version = {{ s3_signature_version | default(2) }} -address_style = "{{ s3_address_style | default('virtual_host') }}" - -# S3 API region settings -[s3.region] -name = "{{ s3_region_name | default('us-east-1') }}" - -# Configure the S3 API to use public URLs -[s3.public] -secure = {{ s3_secure | default('false') }} diff --git a/roles/s3/templates/seaweedfs-s3.service.j2 b/roles/s3/templates/seaweedfs-s3.service.j2 deleted file mode 100644 index 30fa41ecf..000000000 --- a/roles/s3/templates/seaweedfs-s3.service.j2 +++ /dev/null @@ -1,27 +0,0 @@ -[Unit] -Description=SeaweedFS S3 Cluster -After=network.target -Requires=docker.service -After=docker.service - -[Service] -Type=oneshot -RemainAfterExit=yes -WorkingDirectory={{ s3_data_dir }} -Environment=PATH=/usr/local/bin:/usr/bin:/bin - -# Pull the latest images -ExecStartPre=-/usr/bin/docker compose -f {{ s3_data_dir }}/docker-compose.yml pull - -# Start the containers -ExecStart=/usr/bin/docker compose -f {{ s3_data_dir }}/docker-compose.yml up -d - -# Stop the containers -ExecStop=/usr/bin/docker compose -f {{ s3_data_dir }}/docker-compose.yml down - -# Restart policy -Restart=on-failure -RestartSec=10s - -[Install] -WantedBy=multi-user.target diff --git a/roles/s3/templates/seaweedfs_s3.toml.j2 b/roles/s3/templates/seaweedfs_s3.toml.j2 deleted file mode 100644 index 3ec76c1a9..000000000 --- a/roles/s3/templates/seaweedfs_s3.toml.j2 +++ /dev/null @@ -1,57 +0,0 @@ -# SeaweedFS S3 Configuration File - -[aws] -# The region name used when returning the region in the S3 API responses -# Mandatory -region = "{{ s3_region_name | default('us-east-1') }}" - -[s3.auths] -# The admin credentials used to access s3 -# Default: none -# Multiple credentials allowed -id_key = "{{ s3_admin_key }}" -id_secret = "{{ s3_admin_secret }}" - -{% if s3_additional_users | default([]) %} -{% for user in s3_additional_users %} -id_key = "{{ user.key }}" -id_secret = "{{ user.secret }}" -{% endfor %} -{% endif %} - -[s3.admin] -# The admin credentials used to manage buckets -disableBucketPolicies = {{ s3_disable_bucket_policies | default('false') | lower }} -# Default: none -id_key = "{{ s3_admin_key }}" -id_secret = "{{ s3_admin_secret }}" - -[s3.options] -# The addressing style when accessing the S3 API -# Valid values: path-style, virtual-hosted-style -# Default: path-style -address_style = "{{ s3_address_style | default('path-style') }}" - -# The signature version, valid values: "v2", "v4" -# Default: v4 -signature_version = "{{ s3_signature_version | default('v4') }}" - -# Whether to support HeadDirObject S3 API, the SeaweedFS specific API as an optimization -# Default: false -enable_head_dir_object = {{ s3_enable_head_dir_object | default('false') | lower }} - -# Maximum multipart uploads kept in memory -# Default: 10000 -multipartUploadLimitsMiB = {{ s3_multipart_upload_limits_mib | default(10000) }} - -# Whether to allow listing all buckets for any authenticated user -# Default: false -allow_anonymous_list_buckets = {{ s3_allow_anonymous_list_buckets | default('false') | lower }} - -# Whether to support cross-origin resource sharing (CORS) of objects -# Default: true -enable_CORS = {{ s3_enable_cors | default('true') | lower }} - -# Whether to include etag in the response -# Default: true -include_ETag = {{ s3_include_etag | default('true') | lower }} diff --git a/roles/s3/vars/Debian.yml b/roles/s3/vars/Debian.yml index e054c567b..8b2348718 100644 --- a/roles/s3/vars/Debian.yml +++ b/roles/s3/vars/Debian.yml @@ -3,10 +3,7 @@ s3_packages: - docker.io -- docker-compose - python3-docker -- python3-docker-compose -- apache2-utils # For htpasswd command s3_services: - docker diff --git a/roles/s3/vars/RedHat.yml b/roles/s3/vars/RedHat.yml index e377ebf1b..7e26a8c61 100644 --- a/roles/s3/vars/RedHat.yml +++ b/roles/s3/vars/RedHat.yml @@ -3,10 +3,7 @@ s3_packages: - docker -- docker-compose - python3-docker -- python3-docker-compose -- httpd-tools # For htpasswd command s3_services: - docker