rename ass

This commit is contained in:
Oliver
2025-08-10 17:43:04 -03:00
parent 73d9587e1d
commit 4c93fa132f
20 changed files with 447 additions and 29 deletions

BIN
4server Normal file

Binary file not shown.

View File

@@ -12,3 +12,4 @@ set_prod() {
export HOSTS_FILE="/app/hosts.all"
echo "HOSTS_FILE set to: $HOSTS_FILE"
}

View File

@@ -28,7 +28,7 @@ RUN ssh-keygen -t rsa -b 4096 -f /root/.ssh/id_rsa -N "" && \
chmod 600 /root/.ssh/id_rsa && \
chmod 600 /root/.ssh/config
COPY ass /usr/bin/
COPY rex /usr/bin/
COPY template /usr/bin/
COPY dpush /usr/bin/
COPY create_volume /usr/bin/

View File

@@ -5,21 +5,21 @@ Host dev
Host saopaulo
Hostname 192.168.9.11
Hostname saopaulo
User ansible
IdentityFile /mnt/encrypted_volume/.ssh/saopaulo
Host mumbai
Hostname 192.168.9.17
Hostname mumbai
User ansible
IdentityFile /mnt/encrypted_volume/.ssh/mumbai
Host london
Hostname 192.168.9.15
Hostname london
User ansible
IdentityFile /mnt/encrypted_volume/.ssh/london
Host boston
Hostname 192.168.9.16
Hostname boston
User ansible
IdentityFile /mnt/encrypted_volume/.ssh/boston

View File

@@ -10,7 +10,8 @@ fi
FILE="/data/$1"
MAPPER_NAME="encrypted_volume"
MOUNT_POINT="/mnt/${MAPPER_NAME}"
dd if=/dev/zero of="$FILE" bs=1M count=100
dd if=/dev/zero of="$FILE" bs=1M count=10
echo "Setting up LUKS on $FILE..."
cryptsetup luksFormat "$FILE"

9
alpine/hosts Normal file
View File

@@ -0,0 +1,9 @@
127.0.0.1 dev
::1 dev
192.168.111.209 dev
192.168.9.11 saopaulo
192.168.9.17 mumbai
192.168.9.15 london
192.168.9.16 boston

15
app/etc/.bashrc Normal file
View File

@@ -0,0 +1,15 @@
# ~/.bashrc
echo "command: mount_volume <volume>"
echo "alias: set_prod"
export hosts_file="/app/hosts.dev"
export PS1="\[\e[32m\]\h:\w\$\[\e[0m\] "
df -h .
set_prod() {
export HOSTS_FILE="/app/hosts.all"
echo "HOSTS_FILE set to: $HOSTS_FILE"
}

View File

@@ -1,9 +1,9 @@
#!/sbin/openrc-run
name="od8n-api"
description="OD8N API Service"
name="4server-api"
description="4server API Service"
command="/OD8N/sbin/api"
command="/4server/sbin/api"
command_args=""
pidfile="/run/${RC_SVCNAME}.pid"
command_background="yes"
@@ -13,8 +13,8 @@ if [ -f /etc/od8n ]; then
export $(cut -d= -f1 /etc/od8n)
fi
output_log="/OD8N/data/api.log"
error_log="/OD8N/data/api.log"
output_log="/4server/data/api.log"
error_log="/4server/data/api.log"
depend() {
need net

87
app/nebula/config.yml Normal file
View File

@@ -0,0 +1,87 @@
pki:
ca: /etc/nebula/ca.crt
cert: /etc/nebula/{{ hostname }}.crt
key: /etc/nebula/{{ hostname }}.key
static_host_map:
"192.168.9.1": ["167.71.79.60:4242"]
lighthouse:
# am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
# you have configured to be lighthouses in your network
am_lighthouse: false
interval: 60
hosts:
listen:
host: 0.0.0.0
port: 4242
punchy:
punch: true
relay:
am_relay: false
use_relays: true
tun:
disabled: false
dev: nebula1
drop_local_broadcast: false
drop_multicast: false
tx_queue: 500
mtu: 1300
routes:
#- mtu: 8800
# route: 10.0.0.0/16
unsafe_routes:
logging:
level: info
format: text
firewall:
outbound_action: drop
inbound_action: drop
conntrack:
tcp_timeout: 12m
udp_timeout: 3m
default_timeout: 10m
outbound:
- port: any
proto: any
host: any
inbound:
- port: any
proto: icmp
host: any
- port: 22 #GIT
proto: tcp
groups:
- admin
- ansible
- port: 8080
proto: tcp
groups:
-admin
- port: 3001
proto: tcp
groups:
-admin
- port: 8080
proto: tcp
groups:
-admin

BIN
app/nebula/nebula Executable file

Binary file not shown.

BIN
app/nebula/nebula-cert Executable file

Binary file not shown.

163
app/sbin/api Executable file
View File

@@ -0,0 +1,163 @@
#!/usr/bin/env python3
from fastapi import FastAPI, HTTPException, Depends, Response
from fastapi.security.api_key import APIKeyHeader
from fastapi.responses import RedirectResponse
from pydantic import BaseModel
import psutil
import sqlite3
import subprocess
import os
import sys
import uvicorn
from typing import Optional
# Constants
DB_PATH = "/4server/data/contracts/contracts.db"
BIN_PATH = "/4server/sbin"
API_KEY = os.getenv("API_KEY", "your-secret-api-key")
# FastAPI app
app = FastAPI()
# Security
api_key_header = APIKeyHeader(name="X-API-Key")
def verify_api_key(key: str = Depends(api_key_header)):
if key != API_KEY:
raise HTTPException(status_code=403, detail="Unauthorized")
# ---------------------- Database ----------------------
def init_db():
"""Initialize the database with containers table."""
os.makedirs(os.path.dirname(DB_PATH), exist_ok=True)
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS containers (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
UUID CHAR(36),
location CHAR(100),
email CHAR(100),
expires DATE,
tags TEXT,
env TEXT
)
''')
conn.commit()
conn.close()
# ---------------------- Models ----------------------
class ContainerModel(BaseModel):
UUID: str
location: str
email: str
expires: str
tags: Optional[str] = None
env: Optional[str] = None
class StartContainerRequest(BaseModel):
uuid: str
email: str
# ---------------------- Routes ----------------------
@app.get("/", include_in_schema=False)
def redirect_to_odoo():
return RedirectResponse(url="https://ODOO4PROJECTS.com")
@app.post("/startContainer", dependencies=[Depends(verify_api_key)])
def start_container(request: StartContainerRequest):
try:
result = subprocess.run(
[os.path.join(BIN_PATH, "startContainer"), request.uuid, request.email],
capture_output=True,
text=True,
check=True
)
return {"status": "success", "output": result.stdout}
except subprocess.CalledProcessError as e:
print(f"Error in /startContainer: {e.stderr}", file=sys.stderr)
raise HTTPException(status_code=500, detail=f"Command failed: {e.stderr}")
@app.get("/system", dependencies=[Depends(verify_api_key)])
def get_system_info():
try:
with open("/etc/alpine-release") as f:
version = f.read().strip()
return {"alpine_version": version}
except FileNotFoundError:
raise HTTPException(status_code=404, detail="File not found. Press play on tape")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/resources", dependencies=[Depends(verify_api_key)])
def get_resources():
mem = psutil.virtual_memory()
disk = psutil.disk_usage("/")
return {
"memory": {"total": mem.total, "available": mem.available, "used": mem.used},
"disk": {"total": disk.total, "used": disk.used, "free": disk.free},
"cpu_count": psutil.cpu_count(logical=True),
}
@app.get("/containers", dependencies=[Depends(verify_api_key)])
def get_containers():
result = subprocess.run([BIN_PATH+'/getContainers'], capture_output=True, text=True)
if result.returncode != 0:
return Response(content='{"error": "Script failed"}', media_type="application/json", status_code=500)
return Response(content=result.stdout, media_type="application/json")
@app.post("/container", dependencies=[Depends(verify_api_key)])
def upsert_container(container: ContainerModel):
try:
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute("SELECT 1 FROM containers WHERE UUID = ?", (container.UUID,))
exists = cursor.fetchone()
if exists:
cursor.execute("""
UPDATE containers SET
location = ?, email = ?, expires = ?, tags = ?, env = ?
WHERE UUID = ?
""", (
container.location, container.email, container.expires,
container.tags, container.env, container.UUID
))
operation = "update"
else:
cursor.execute("""
INSERT INTO containers (UUID, location, email, expires, tags, env)
VALUES (?, ?, ?, ?, ?, ?)
""", (
container.UUID, container.location, container.email,
container.expires, container.tags, container.env
))
operation = "insert"
conn.commit()
return {"status": "success", "operation": operation}
except Exception as e:
print(f"Error in /container: {e}", file=sys.stderr)
raise HTTPException(status_code=500, detail=str(e))
finally:
conn.close()
# ---------------------- Entry Point ----------------------
if __name__ == "__main__":
print("Version 0.1")
init_db()
uvicorn.run(app, host="10.5.0.1", port=8888)

61
app/sbin/getContainers Executable file
View File

@@ -0,0 +1,61 @@
#!/bin/bash
# Collect running container IDs
container_ids=$(docker ps -q)
# Start JSON array
echo "["
first=true
for cid in $container_ids; do
# Get basic info
name=$(docker inspect --format '{{.Name}}' "$cid" | sed 's/^\/\(.*\)/\1/')
image=$(docker inspect --format '{{.Config.Image}}' "$cid")
container_id=$(docker inspect --format '{{.Id}}' "$cid")
# RAM usage in bytes (from docker stats)
mem_usage=$(docker stats --no-stream --format "{{.MemUsage}}" "$cid")
# mem_usage looks like "12.34MiB / 1.944GiB" — extract the used part
mem_used=$(echo "$mem_usage" | awk '{print $1}')
# Convert mem_used to MiB as a number
# Support KiB, MiB, GiB units
ram_usage=$(echo "$mem_used" | awk '
{
val = substr($0, 1, length($0)-3)
unit = substr($0, length($0)-2, 3)
if (unit == "KiB") print val / 1024
else if (unit == "MiB") print val
else if (unit == "GiB") print val * 1024
else print 0
}')
# Traefik labels (extract labels JSON)
labels=$(docker inspect --format '{{json .Config.Labels}}' "$cid" | jq -r 'to_entries | map(select(.key | test("^traefik.*"))) | map({(.key): .value}) | add')
# Comma between JSON objects
if [ "$first" = false ]; then
echo ","
fi
first=false
# Output JSON object
jq -n \
--arg name "$name" \
--arg id "$container_id" \
--arg image "$image" \
--argjson ram "$ram_usage" \
--argjson tags "$labels" \
'{
name: $name,
id: $id,
image: $image,
ram_mb: $ram,
traefik_tags: $tags
}'
done
# End JSON array
echo "]"

52
app/sbin/startContainer Executable file
View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
exec > /4server/data/startContainer.log 2>&1
echo "$(date '+%Y-%m-%d %H:%M') - startContainer $1"
CONTAINER_NAME="$1"
# Get the hostname of the machine
HOSTNAME=$(hostname)
mkdir -p /4server/data/${CONTAINER_NAME}/n8n
mkdir -p /4server/data/${CONTAINER_NAME}/data
sudo chmod 777 /4server/data/${CONTAINER_NAME}/n8n
sudo chmod 777 /4server/data/${CONTAINER_NAME}/data
# Stop the container if it exists
if docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
echo "$(date '+%Y-%m-%d %H:%M') - stopping existing container $CONTAINER_NAME"
docker stop "$CONTAINER_NAME"
docker rm "$CONTAINER_NAME"
fi
docker run -d \
--name "$CONTAINER_NAME" \
-p 5678 \
--cap-add=SYS_ADMIN \
--security-opt seccomp=unconfined \
--restart=always \
-e N8N_HOST="${CONTAINER_NAME}.od8n.com" \
-e N8N_PORT=5678 \
-e N8N_PROTOCOL=https \
-e NODE_ENV=production \
-e WEBHOOK_URL="https://${CONTAINER_NAME}.od8n.com/" \
-e GENERIC_TIMEZONE="UTC-3" \
-v "/4server/data/${CONTAINER_NAME}/n8n:/home/node/.n8n" \
-v "/4server/data/${CONTAINER_NAME}/data:/data" \
--label "traefik.enable=true" \
--label "traefik.http.routers.${CONTAINER_NAME}.rule=Host(\`${CONTAINER_NAME}.od8n.com\`)" \
--label "traefik.http.routers.${CONTAINER_NAME}.entrypoints=web,websecure" \
--label "traefik.http.routers.${CONTAINER_NAME}.tls=true" \
--label "traefik.http.routers.${CONTAINER_NAME}.tls.certresolver=production" \
--label "traefik.http.services.${CONTAINER_NAME}.loadbalancer.server.port=5678" \
--network docker-compose_4projects \
n8nio/n8n:latest
echo "Done $1"

25
app/templates/4server-api Normal file
View File

@@ -0,0 +1,25 @@
#!/sbin/openrc-run
name="4server-api"
description="4server API Service"
command="/4server/sbin/api"
command_args=""
pidfile="/run/${RC_SVCNAME}.pid"
command_background="yes"
if [ -f /etc/od8n ]; then
. /etc/od8n
export $(cut -d= -f1 /etc/od8n)
fi
output_log="/4server/data/api.log"
error_log="/4server/data/api.log"
depend() {
need net
use logger dns
after firewall
}

View File

@@ -8,8 +8,8 @@ services:
- POSTGRES_PASSWORD=deradmin
- POSTGRES_USER=deradmin1
volumes:
- /OD8N/data/postgres:/var/lib/postgresql/data/
- /OD8N/data/pg_backup/:/BACKUP/
- /4server/data/postgres:/var/lib/postgresql/data/
- /4server/data/pg_backup/:/BACKUP/
networks:
4projects:
ipv4_address: 10.5.0.200
@@ -23,9 +23,9 @@ services:
#- 8080:8080
volumes:
- /run/docker.sock:/run/docker.sock:ro
- /OD8N/config/traefik/etc:/etc/traefik
- /OD8N/data/traefik/certs:/certs
- /OD8N/data/traefik/traefik-logs:/var/log/traefik
- /4server/config/traefik/etc:/etc/traefik
- /4server/data/traefik/certs:/certs
- /4server/data/traefik/traefik-logs:/var/log/traefik
networks:
- 4projects
restart: unless-stopped

View File

@@ -2,27 +2,14 @@ global:
checkNewVersion: false
sendAnonymousUsage: false
# -- (Optional) Change Log Level and Format here...
# - loglevels [DEBUG, INFO, WARNING, ERROR, CRITICAL]
# - format [common, json, logfmt]
# log:
# level: ERROR
# format: common
# filePath: /var/log/traefik/traefik.log
# -- (Optional) Enable Accesslog and change Format here...
# - format [common, json, logfmt]
accesslog:
# format: common
filePath: /var/log/traefik/access.log
# -- (Optional) Enable API and Dashboard here, don't do in production
api:
dashboard: true
disableDashboardAd: true
insecure: true
# -- Change EntryPoints here...
entryPoints:
web:
address: :80
@@ -97,5 +84,19 @@ http:
trustForwardHeader: true
routers:
saopaulo-router:
rule: "Host(`dev.local`)"
service: saopaulo-service
entryPoints:
- websecure
tls:
certResolver: production
services:
saopaulo-service:
loadBalancer:
servers:
- url: "http://10.5.0.1:8888"

3
app/todo.txt Normal file
View File

@@ -0,0 +1,3 @@
create user
set bash as default user