Compare commits

...

30 Commits

Author SHA1 Message Date
Oliver
38873facc0 import db 2025-11-24 16:30:29 -03:00
Oliver
b3e7909c90 fix gitpath 2025-10-19 06:11:08 -03:00
Oliver
1808864021 fixes 2025-10-16 16:38:39 -03:00
Oliver
8ed294f3b7 works 2025-10-11 18:55:02 -03:00
Oliver
7953190b37 work 2025-10-11 05:43:29 -03:00
Oliver
f228ecb6cb fixes 2025-10-10 17:08:49 -03:00
Oliver
ca2520727d odoo fucking 17 2025-10-10 17:03:29 -03:00
Oliver
7e05162ed9 update 2025-10-10 14:38:52 -03:00
Oliver
5fe0fd6028 clean 2025-10-10 14:32:35 -03:00
Oliver
09c46d4a8c add ODOO 18 2025-10-10 07:35:27 -03:00
Oliver
69c54bff7a mumbai 2025-10-09 18:04:12 -03:00
Oliver
7faad3241a working 2025-10-09 13:02:27 -03:00
Oliver
38907a46da fix 2025-10-03 08:49:35 -03:00
Oliver
dde5ec0eab fixes backup slots 2025-09-30 16:22:45 -03:00
Oliver
bb271b8d71 working 2025-09-26 16:27:40 -03:00
Oliver
ac8f70d368 working 2025-09-25 14:55:30 -03:00
Oliver
b048ec1114 fix ID 2025-09-07 08:08:40 +02:00
Oliver
ac7a7793e7 new 2025-09-07 07:57:12 +02:00
Oliver
80a5062126 Bad Gateway Nginx 2025-09-05 08:06:32 +02:00
Oliver
40775c6842 first spin 2025-09-04 19:51:20 +02:00
Oliver
f44758653a a 2025-09-04 17:13:55 +02:00
Oliver
f654fb1dd0 v 2025-09-04 17:10:11 +02:00
Oliver
3c60293ac6 working 2025-09-04 16:38:30 +02:00
Oliver
0ea18322be fixes 2025-09-02 08:54:21 +02:00
Oliver
7d0f1967b3 working 2025-08-30 10:29:36 +02:00
Oliver
86acea94b9 working traefik 2025-08-30 09:53:31 +02:00
Oliver
9726dc0060 certs 2025-08-25 07:10:14 +02:00
Oliver
d45114c4dc no passwd 2025-08-25 06:01:18 +02:00
Oliver
c373588b5b working 2025-08-25 05:52:55 +02:00
Oliver
29a9892ca6 fixes 2025-08-24 20:24:37 +02:00
89 changed files with 1954 additions and 222 deletions

1
.gitignore vendored
View File

@@ -1 +1,2 @@
tmp/ tmp/
exchange/

View File

@@ -14,4 +14,4 @@ set_prod() {
export host_vars_dir="/app/host_vars/vault/" export host_vars_dir="/app/host_vars/vault/"
echo "LIVE MODE ENABLED !!! " echo "LIVE MODE ENABLED !!! "
} }
clear

View File

@@ -14,7 +14,8 @@ RUN apk add --no-cache \
mc \ mc \
e2fsprogs \ e2fsprogs \
screen \ screen \
rsync rsync \
device-mapper
WORKDIR /root WORKDIR /root
@@ -30,7 +31,6 @@ RUN ssh-keygen -t rsa -b 4096 -f /root/.ssh/id_rsa -N "" && \
COPY rex /usr/bin/ COPY rex /usr/bin/
COPY template /usr/bin/ COPY template /usr/bin/
COPY dpush /usr/bin/
COPY create_volume /usr/bin/ COPY create_volume /usr/bin/
COPY mount_volume /usr/bin/ COPY mount_volume /usr/bin/

View File

@@ -1,25 +1,35 @@
#Host dev
# Hostname dev
# User oliver
# Port 2222
Host dev Host dev
Hostname dev Hostname dev
User oliver User 4server
Port 2222 Port 2222
IdentityFile /app/host_vars/dev/dev
Host manchester
Host saopaulo Hostname 192.168.9.20
Hostname saopaulo User 4server
User ansible IdentityFile /app/host_vars/manchester/manchester
IdentityFile /mnt/encrypted_volume/.ssh/saopaulo
Host mumbai
Hostname mumbai
User ansible
IdentityFile /mnt/encrypted_volume/.ssh/mumbai
Host london
Hostname london
User ansible
IdentityFile /mnt/encrypted_volume/.ssh/london
Host boston Host boston
Hostname boston Hostname 192.168.9.16
User ansible User 4server
IdentityFile /mnt/encrypted_volume/.ssh/boston IdentityFile /app/host_vars/boston/boston
Host mumbai
Hostname 192.168.9.17
User 4server
IdentityFile /app/host_vars/mumbai/mumbai
Host meppel
Hostname 192.168.9.21
User 4server
IdentityFile /app/host_vars/meppel/meppel
Host saopaulo
Hostname 192.168.9.11
User 4server
IdentityFile /app/host_vars/saopaulo/saopaulo

View File

@@ -1,17 +1,16 @@
192.168.9.15 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFs8k+3o351E6Plim8S3xjog13YOERnOkBjwIZPUf7Nr 192.168.9.17 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINmL6mEsSSpEWVICaXSFOd/Z1i6S5zn+B2q/ZKlqLocb
192.168.9.15 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCrRIMP8pjQPX+ez16gVA92ekCIJ0r7i698cHJfYr2ZWv7GHGjSUIhJ3v50g592cDKJxwmVhBcLhQmvsf1Z8J+dsTi6X2n+bljTEnyp9Vznk6GoBeia7eXV/MJHHLcQ9e7ewx+8JfsAJ6qkmnbZ3wjPKPO9Oxdh8hjGc4Zqq9vD9WwmpcqjJyigIo/C8dh20QLk0ZegtaqxTn5SRbKJH+5lPitTeF+d/rD/j6ifr7SJzUzRVCgLdEA+1wZsqZImxQzO+Xm7R6qLsQrN3Th2OvbcD9LINDor6Xl1CaEGC4QsV++j7Vwj6Ljw5EYoPztfhcpSthzGJ84X7YE/ewtrm+mOEtvyFpz5JwPl5bAvj1A29V4juzHntuXUXYAJSkORchHVFDY6cadBNhDAXtTMg+UeCC2umt72DJW1qJvig35D65ipkz22+HVavF8fo6w5M+YkSsFCwBMn/XghBJ346myT6WN9UfH93RkIocloeehF3JQ4m9IS/xqSsrIYjjjLTr8= 192.168.9.17 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDhrtXNi+1udDp34CDDmMWNqUTfsn7F6XVv4xtGSmaUbREt0A56RIqo6EMegLSN+9J2z+x85ZxxNRoQc2kAamB9/6v4poDyMJKhqDJ4qimyboOSPI1FqAojmeSCGXmSU6bk5lYDKbkQsINeaGiQ86inv8Rgar4EV834Sp1HEGUZBksn7+xPL4MkmMWfcykrELgU8dnsJYiRJKWxU2d4UTWorBWtcICg1SJndB2QBncYT3dOEl+283HDBnKU6w2o7OhP+Tz+vTyVxtqZ7QF9Z/e7et9zFXnjsTopnkCVBFt8/esKPmgesiY+j0COx9aaMD6Ks2ssa8UqYHybmHwO8o5vfvcCR3ofZhhkGRnAXijd1D6bK3uXJR13PYnlPjiNQA0Lvf2YrOMDBDmRk5j8zQRHXe7oygmYz49z+lrR83VBIFVJkTt/GELQJ6kjQdSHdRinL54sVPl407cdmzBdwenY6vBCcN2UIDu4ohubqr0QH//PaXbtj4pEEExzurdq99U=
192.168.9.15 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBN2wcMkqDNPkNR4YIKrbG1nVmRlVySBy1jZQBs78wTglW5Tw9+R3io4K24yaQ0/OVtA4aTw/mC1RJntF+ZEMhWU= 192.168.9.17 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJPErwjui8jQC3Tkql+G6PsDd16pe7wm2vEhs/qYaEGSM+L7lW7XiXVMPhpCd4UrRVFFibjyfZsxGCv8LmctqIs=
192.168.9.17 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE2xvM7EltHoRzAqqdHURdWxQeSC1kh4ZjxOmuJrx5B4 192.168.9.20 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG9kTGcpJCCoUDyPmhWx9hWTLhXr1SqpYFSW50l1zFsa
192.168.9.17 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDcaJKryDNGyg3GE99pA2FmWrRbf8e7DELfkK/UnpMfoVInWbr8nEhsyxmzda7wiO9rwYgtlWLhckIs6RIrE2LStCuvXODqKNSx4I7n8QmDHgVLTKP4JEbf6V7g4BKFjH153KUTWfZ++wCXkDExXGdcMvT33pVWsoyIRc/3RQRECcByoMp8mddPAibrrwztuB/L/WxHhCgIo1F3YCZzDVKu/s71KVAK82aiyDsVn0GIVCw6locFBUena4nlJP+A4O/9YS+8PhSjkqwI9XbAB50EA9vlsG1xk77NzltgADIsLvh+hKhVGMMWXEXl2UennyJGUxiu9ASHM6nqLjEFp9F0cTNZ5+8zOuERj488qsXj0DTMFADd4fsPbWxixuJIb7PS0X09/0VmQerYEbd78R1s8LCZ5dm9MO5w439YIN8Uy0jChOAIcJ7GawRJTTfWCYzyxImHxoF8HjxHzljtRax5zWo4aqbL+yXiIMIeCfqliiV6QvINWE3maP++1I9zdtk= 192.168.9.20 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCoTci1WePcd1dFfZbS6C6utZ0ni9aGuvTc1YpORg1fnEF8IZEB9FzEb4vGIEMqyYZq9HsVXJdcBTbDMtAqYlx3IqIMRsS8zFnnlqM+u0OKgt4WLRDfvKY3eGNilm3gplNBwdnh0dHafRy+CSHMvC/fcnkjFFfj8nQ8rdOoE/qPd1gH6l0+mcGWpvH78dK3eU03M964aAxzrBqUhU3y6lYSinI0EZHf6mniOSVBJ27ulImisb+z14row2HgPvjnIbp89QOP9b7b0lzVQ5VdmikAMVtsbKARyZi7Tn1ZrTZqGBx5SnuGKxG0Wl3gLBAL5pTHV7XrmCUJOkUdnoTNCxjsV6Qo6SBiKHYqwJPGry4MbT2bOdFIkw5YVJKEU+Q6txMy1axcZ2ACtcWRiUaMHoVsfbFiQv/Llgc7fsmD8NCMNArD80N4YTD95Di+Ch8FZ0Cf5sfV/6NnvER2hivr+9+Q84UVwA1RyhGrPPS9emMixIod5UgtzO9vmJcpJrmiCLk=
192.168.9.17 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGyCXRdNzmq3Bu9HectNVM/Gn2E2/nC/y0EtIn+WIaW7At0eUbf+v8qjk4XqwhwR1/eUttXQS3841a7HixUAbsI= 192.168.9.20 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLBKg3hH/hdu4o51Ubg831HsUEedZp0VNAtpmY5QN9RTOJJaYxUJBo3ZbyrLMEWCvVTAT4nfwUAJhaf78l8Lk60=
192.168.9.11 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICNB6+lwnOHdZ8tqkU6jWqCnA6SOK03CpDCJek+XVtsA 192.168.9.16 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKSSlK1dlG8Qszv7sy0DAuNu3bsH7c1eTfFOL3OcsAyE
192.168.9.11 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+FmXjANsPJZPEySCTc9uOf2qPXCheeUcuAfL6+KXJ2WVuQFituDfqLC9BvOTprweu1SV4SLEMq37qSVUkMJhBRWSgA7/Sy1EHaWb0IWxTZamhjjjzhORTKhEL6n4gopoRP1ZzytjRnh7S4JNbDukQ5v0nrjMsgkp5yTNbHJOJ2IwQMdi3ajkYLyDgkqQRIQGiBf7hvQIjWnwiQ3TYor29SwAr3fCddk70aIETBoNI/YaaFGGi7eMy9WqwFsSUeqecuSBr21R5S9bfw7CFheQAOTsQ9XCQiPerGdxZtChiAXiGoAonSUeo8jJCcDjpUD1lkLRbov94Red131yFuZcdZroKifd8aByPVVA3RPSe7GmEhBXSc8QHjpXcUoAz7MqRXt9SBdaMcX3DBrVl3Y7PgVymGkuvhCy1Xh2iq54/y8OFQLMVBn9Euy5r4DlXV8xYX1Z3JYGpZV4WFcqU0eRR3e+njcL7UiK83ob35Bfgbu4MOUoz3qbdmEfSQJjSvXc= 192.168.9.16 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCXNdN2lYWg9k3r5B9QISWCAWvAzzDj/aO5EeYPimDcz383Qliq0xW+FjJvgw2fY/Aoljwd6pGY3pJPN1aM3i4Qxt22S6emsFkGm89/GOdEu9ZkB+ln0fL8uPJTdjQklkhGb8YIF4aD8OMkMNXn7Ale+TiJJuWWmh7UAQITM5EqX1Wq+uAM/2ixpKo3dQU8L6pzeGi5yQUzHB3eyckSnFjCrSS/hW6lsKyhoaSYn+cAiuhbWEwP4lv9um6Cl2rLZYcysJTTs1DP7gJL3eyytnQ83R8MEX5/qY2zSVFCaNPguF7hlC7MgdR90naqlzm40XzYXHloDIg92+kGZGdR6jcKy7QasHuQLeXXJEk63jl64IJwRJPNnrVGszEDwIz0nBzVX/Yot2R/uAjhJRdR5d9tlZbMILkng6I/BVdJhqnJzuBottZpfu0qaBakIecDbLiJeU+AbAQffzWesyAmlHaLLLCHgwLdNaVjCltx6/RebF8HRRO82sdRE8Js6dSAXZ0=
192.168.9.11 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKd+ohYp93wWzXTcLcRVL5TQRZ3NJGZQAqrqRdL6wQ4bkEUE0JkOgf9YOuwvZeYIp9uGlCZWa8IZmMaBUFXQtbM= 192.168.9.16 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLpXVmQVbrhdfbrTHINQ2lZCxLxgrWbIDoNoxRR7EuqI5qNr2LQhqmTVZpNUIvj7PhdGPN4hry9jMfC39Dwa7Eo=
192.168.9.16 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOSV2fWgIzg8xOcb6qmmxfLMXpGFbT6j2w7GC6JwGCah
192.168.9.16 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCLzyfVrbCNJW9tNTkxMy8IDLGyQoqkxlGN+TirLoNyw5GD1XpJgvHIBy//6tDEIuAfL5ouqp0T+PM00nSj6gEjGeTQtjrzAeSXJ5D3Q3+Vl92j1uSPPhnChRnidI2mrjvNuaukaDxBMwIvr9sagTvtZwCupKbuYeW0kQqCwnSIjTS7OwcrCRm3RtzC8bOPpOIHYaLxV6OnyoRXKLv9Lu987nuNoTcZu61jyCHhFTzzN3Y3KRpioshUoHiukgJbJQVnRuVkxQRknWa5gS4ATLLUG70taTs0Ld4Canyrym4aVV9pw5rDwH0rkidvkz8OLX049mIeGhv/QPRH8x5bgtPezKkiOcPW22HG0+z+7zHgbkTKlcvGr1JdhkTN6jqL4HyYdhMj04v7miv8LnxjaBNb3iNFQiAs5aV5LmPevYP+/94TGBIlTVCh5oTyeDJ1OKmZKL9ZUUfu0Ozz+v3O3en4hNMOI8P+uujfIxfvn/Pasxh4r7pTGYQV9nseCmrsl0k=
192.168.9.16 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLtNCuNwiidC4IdZci3J0s/vbcTtFZod47BKd8GKyGrucOS402fuZ03F1elrEKjA0PcFNZQR7MbEvF6zDjEfoPY=
[dev]:2222 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILf8g96UGmWCLf5C5qfGgir3ned83s5HXNpM231A0rUu
[dev]:2222 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC45Aw7jQXh3/QOMOCkM4wvz4/yXxpjQU5C2ZMJ/76lTPB/DsvP9C+DGKqO3zzznOhdaYzTb5lipMguWRyH0RYlwYtR7TnYKTZgioXC/+kSdVzgAUeqcUCUIoNH5CSp/lgsAp1XxQTPk3ooZJc57cTnHo78Y+oOj7bF8u8Nin/q+uTTrgXVkbMCZYSX+CA/fHtmatH9vMdAbPv/Bx9M72vijMMZWOP/6hwRhn3C0s2o1rgz/nXe5u3HYIodwzItqdceF3k6hLuZP8Bb+kKz42wXstke5SXivnUm3uDdkLNKjKWnSWoD7TAIA9w6MuFbLBLL6QRBmfcx74F6aeuV55ItzRPVAz0XEqQU55r3nS9r7zLyT0HGvu4Wt+Zf97nHKfWucaf/5UFWoTC9pijYEYbNyhYockyzM+QAx3/n6cfwIjlc/wFkU8nd7fUjfkymTlIX6UiT/12b+TrFuOLBk4mylDGkPge7zfslvH/LOyokzCRsz75Sh3bc7bxB1ppPrjc=
[dev]:2222 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBK+WCqjyl6CKaDnMAcfqGYty8KoWqh95T0lQK3RFcNfHom4rsZljS2tPlicfRSHT/2zWH7HfQ9T2t/2PYjWjxrc=
192.168.9.11 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFbAGYglyMYT/IfX9G4n9GhbPf+8T7TyVin/DfP3eKgb
192.168.9.11 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCthnkPJKC1Ij/Fc4fTRubcV8xDSKJ4UrlUYgZ+lO2AZYKhefVZOaDkizqWbjf9c4dqAAEX1lqgDwr3UwEw6hukLLhherCU6tn3Q/nmOQsbKrEGG+kJXnzCi1jYsH4UNQB8Xl18eaLhGZosqCfEVK3LHw2/nMsxLfbrw61qf9Z7/haQOJU2M2W9X3U/yEk4sOCoxguJIoNJ0JZB+X3xO2TXKLCKuaWpckbmHAVuL0c4u0ZfYqF3lCxWLj/iI5SKD/YqajohaVBaoJkmtIkLuuSSFe5gJb22htMgKXoQ0TcxxAvTnHwa92sVSMx7Lp1zQ5RGzN+Dw99FL4HyR4KDPlmVpGPZbNp0LnnggQ0SBmT6F3KarNZLzSW6w2foBkl9XjTEBUjb5cHXK8jq7nEUK26TgirngSAFg7y44NWta6whRX0+/wCanlvgQ1BNYGf4vcw5P+Q8MoGOdepF7b8gQiUn/KhtmnQem/Q1//xDt9MOJkmjBemG9KIso/gqtJkHeps=
192.168.9.11 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDc6J024++J25z2ZMXQ3Vim+mLVIVs+cZngv8DmtEDfrT/Ptl7+F2IqXRNcZuq1YTjEZO8eQg27iIemPoPT3xa8=
192.168.9.21 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMFuf87ngDaF8D0kzVzzB953ji6ptg/V9t+WPac+DAjO
192.168.9.21 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDtexcutExMflSRPJOwGPuZx49c+ZUMoElNPNntXONOuVZ8jM+zzA/4s+HCj+RWdHMnarLcNJjwRZRq7JWruqmr1EN9eNmvu13cLCKZr6Ape1gt8VmMhvQ9nA7eMx3UdvnWgXjLCfJy+BKh2XVAneM5lOSPMcyQlcACrLQVkse9EKhxhV5nEjXKYxSNetIPK9PxglXznpC8IpuWlXnDH2R8vDhdvBmBFTKOjN5Wa+GrMfElWeQOjyCpNcMVYohvV87VN6FxHQTADFTm2CKy/W7pnM8rI55ZbBIMfvLyhpM+vtFh1sJlIZSFnn+ytMUImNEAgBDI3fCpwZ99zeLViGhQJlBkdCUI+JJK9XJjpcOMNydeWh142RGU6juPuSLOZPqKNc3BpZYgPY0vMDgDWgT9AcYup3rAJ/UnUnhsxiT2h/gx7+t/j9xg01BQF5S6mezaueuavHCh9MvAzXiJXR8zanhl/Hh9BKgIAFrZh71CzP/PYG33BKCe9DXA09aJrf8=
192.168.9.21 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGlzLZWNjSaDuDd4e66LoBizY5j+QCsifIIxkvX4CrzP/AqAgWDEEgT+pAXBFkNJlBR6TDFJ0bIdwZTbcq8/72E=

View File

@@ -4,5 +4,5 @@ cmd="$*"
echo "Running on hosts: $cmd" echo "Running on hosts: $cmd"
pssh -h "$hosts_file" -t 0 "$cmd" pssh -h /app/host_vars/hosts -t 0 "$cmd"

View File

@@ -5,37 +5,51 @@ if [ "$#" -ne 2 ]; then
exit 1 exit 1
fi fi
host_vars_dir="/app/host_vars"
keys=(NEBULA_CA API_KEY HOSTNAME NEBULA_CRT NEBULA_KEY SSH_PRIVATE SSH_PUBLIC)
NEBULA_CA=$(<"$host_vars_dir/ca.crt")
localfile="$1" localfile="$1"
remotefile="$2" remotefile="$2"
remotetmp="/var/tmp/4server" remotetmp_base="/var/tmp/4server"
# Read hosts from file descriptor 3 to prevent ssh from consuming stdin
while read -r host <&3; do
while read -r host; do host_env_file="$host_vars_dir/$host/$host.env"
echo "Processing host: $host"
host_env_file="$host_vars_dir/$host"
if [ ! -f "$host_env_file" ]; then if [ ! -f "$host_env_file" ]; then
echo "Warning: env file for host '$host' not found at $host_env_file. Skipping." echo "Warning: env file for host '$host' not found at $host_env_file. Skipping."
continue continue
fi fi
declare -A vars=() # Load host environment variables (supports multi-line)
while IFS='=' read -r key value; do set -a
[[ -z "$key" || -z "$value" ]] && continue source "$host_env_file"
vars["$key"]="$value" set +a
done < "$host_env_file"
content=$(cat "$localfile") NEBULA_KEY=$(<"$host_vars_dir/$host/$host.key")
NEBULA_CRT=$(<"$host_vars_dir/$host/$host.crt")
for key in "${!vars[@]}"; do SSH_PRIVATE=$(<"$host_vars_dir/$host/$host")
content=$(echo "$content" | sed "s|{$key}|${vars[$key]}|g") SSH_PUBLIC=$(<"$host_vars_dir/$host/$host.pub")
content=$(<"$localfile")
for key in "${keys[@]}"; do
value="${!key}" # indirect reference
# Replace placeholder {{KEY}} with value using Bash's parameter expansion
content="${content//\{\{$key\}\}/$value}"
done done
# Copy content to remote temporary file
remotetmp="${remotetmp_base}_${host}"
echo "Copying to $host:$remotefile" echo "Copying to $host:$remotefile"
echo "$content" | ssh "$host" "cat > $remotetmp" echo "$content" | ssh "$host" "cat > '$remotetmp'"
rex doas mv $remotetmp $remotefile
done < "$hosts_file" # Move temporary file to final location with doas
ssh "$host" "doas mv '$remotetmp' '$remotefile'"
done 3< /app/host_vars/hosts

9
app/README.md Normal file
View File

@@ -0,0 +1,9 @@
aaa-bbb-UUID
aaa = server
001 = manchester
002 = boston
bbb = image
001 = n8n
002 = ODOO_18
003 = ODOO_19

6
app/download_sbin Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
rsync -avz --progress -e ssh mumbai:/4server/sbin/* /app/sbin/

View File

@@ -12,4 +12,4 @@ set_prod() {
export HOSTS_FILE="/app/hosts.all" export HOSTS_FILE="/app/hosts.all"
echo "HOSTS_FILE set to: $HOSTS_FILE" echo "HOSTS_FILE set to: $HOSTS_FILE"
} }
cd /4server

View File

@@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDBzCCAe8CFA4xhYG29I1JGz2K+LN79to0c5HrMA0GCSqGSIb3DQEBCwUAMEAx
PjA8BgNVBAMMNTAwMS0wMDEtMTIzZTQ1NjctZTg5Yi0xMmQzLWE0NTYtNDI2NjE0
MTc0MDAwLm9kOG4uY29tMB4XDTI1MDgyNTA0Mjk1OVoXDTI2MDgyNTA0Mjk1OVow
QDE+MDwGA1UEAww1MDAxLTAwMS0xMjNlNDU2Ny1lODliLTEyZDMtYTQ1Ni00MjY2
MTQxNzQwMDAub2Q4bi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDPRqge1j5i2Mtygzh4Ep3S4WAVRUbygFgliBslkrO4d4mxusOLhzBn5JJY+NWA
pNsvnojMcGlgeIugsi3MeMn2/ay88Y5THPrHXqf4jTJB8DvlYbi41HfBX6rstF4z
2IZ4gIp6aem6wVuIcI6DKlPlEQss09aFkTrp4jKvPPCq3tgbcI4PkHvHm4fpzIjW
5I8JrQBqBHrNmYQfUT+ZEABKj0XQMH+CceNOIw18ChKoHIJbIpqAKO0zMYiQ6fCZ
y4OWJCHk7ekXNdNPjt2K1lh1doNK6gPxjsIuh5Pxd+BANoumqMCFbLNs8bdwO7p7
Po5uFbU5RB4L5KSoxYPgmFZ5AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAAD/1Wxi
xzOkSNHG3Fu50q0m4qxEEoNYvxFkB2EUSA5DKGeCQq46tKpjNR+zZzG2fbn711v9
qSNuZEhxajQkQ0oR7CbMDs8Ql/WlregdZTv053liBHFkpwYVRSaaE2LnxvnlbmMq
eNIlPldmN/b7Rs07e5GcIkZ8mMQrbT2TuQV4Q7be8qjex8zF7OzLF1ok//C2SMwF
0qd5Z9e4rycI2XleE73Y/Vdl1vO+/RbZDfhhp12gYi/A+jywMYgckVEjUKQ2/9FT
UIaDtDEh9oYirBEyqmJPsZi2nqt1UJKDctqGykmZZwymAtfYPnPNVWYgp7nU210c
hc8zQDaZyEZUEYw=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,16 @@
-----BEGIN CERTIFICATE REQUEST-----
MIIChTCCAW0CAQAwQDE+MDwGA1UEAww1MDAxLTAwMS0xMjNlNDU2Ny1lODliLTEy
ZDMtYTQ1Ni00MjY2MTQxNzQwMDAub2Q4bi5jb20wggEiMA0GCSqGSIb3DQEBAQUA
A4IBDwAwggEKAoIBAQDPRqge1j5i2Mtygzh4Ep3S4WAVRUbygFgliBslkrO4d4mx
usOLhzBn5JJY+NWApNsvnojMcGlgeIugsi3MeMn2/ay88Y5THPrHXqf4jTJB8Dvl
Ybi41HfBX6rstF4z2IZ4gIp6aem6wVuIcI6DKlPlEQss09aFkTrp4jKvPPCq3tgb
cI4PkHvHm4fpzIjW5I8JrQBqBHrNmYQfUT+ZEABKj0XQMH+CceNOIw18ChKoHIJb
IpqAKO0zMYiQ6fCZy4OWJCHk7ekXNdNPjt2K1lh1doNK6gPxjsIuh5Pxd+BANoum
qMCFbLNs8bdwO7p7Po5uFbU5RB4L5KSoxYPgmFZ5AgMBAAGgADANBgkqhkiG9w0B
AQsFAAOCAQEAktJBSt2lSbrsmUlhG+6AZ4lo52qOwmxxTQ7steEfkMp6zOvO3FXk
meOiU59fFyOkH0pUpJo4RolZPfSyzdi0R9fV5wR/a1eqaiNyzReTPyyXKP2SMdzu
Xav3ldaMOGp6gPa3qmyQ6nQJjVJWj/FulCslIAv55Qk2xMlRQYV+IIK4Gggl74d9
Kwbq3MDvMeLJS5Qzr/hHqWmPWiUKbs1DbTajSe63B36/yMEi8VYjdWw7K86kS2X/
0yy1M9+HOHPG1Ch5zHaa64iioo1iaMxTqBgOvJTsuCtNX5oflj56STBozNvzMq1o
0/E9/uxW1TDL8iDYp/k2krT4k1M0rKq6Tg==
-----END CERTIFICATE REQUEST-----

View File

@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDPRqge1j5i2Mty
gzh4Ep3S4WAVRUbygFgliBslkrO4d4mxusOLhzBn5JJY+NWApNsvnojMcGlgeIug
si3MeMn2/ay88Y5THPrHXqf4jTJB8DvlYbi41HfBX6rstF4z2IZ4gIp6aem6wVuI
cI6DKlPlEQss09aFkTrp4jKvPPCq3tgbcI4PkHvHm4fpzIjW5I8JrQBqBHrNmYQf
UT+ZEABKj0XQMH+CceNOIw18ChKoHIJbIpqAKO0zMYiQ6fCZy4OWJCHk7ekXNdNP
jt2K1lh1doNK6gPxjsIuh5Pxd+BANoumqMCFbLNs8bdwO7p7Po5uFbU5RB4L5KSo
xYPgmFZ5AgMBAAECggEAXDMx1YW3eoC0/tsf40lkqslV1CBczsIIc6l3ad830nZZ
6ZUKJqacAZrK/oixb+flF+mNMGNQfHkiovifJrUUIan1jJZmmNHrO4P/c7BbCrmx
6vbtFEpuerXzchdJUAagyjljX9B9B3W7IZXvzqilaN+L+QTCB+fyLNdRdGHHLDnn
HflKCPVQS/PWTBLufxtmmZ59uqLkWzOio8d710qrxPankejXb2kvuDjWuWu9Rdja
lxR8IdZaG32hPhrkFL7fxlQjLuRsPIP0GMaQeEOOvyTi2e5BBCE1CiCxT78e8cIP
eae42jNHelhkj/FA6vLcQwPAis1TZ8e3+GkUt357gQKBgQDtxaBm8PT6a+R9GNzs
c5gCWwm21YUqzbCl4Q/8Mnyn2wqQn+/RmfMwr1uNLAiGkiB1JNjbO0pzoeJyk03F
vDxd/QyicSfYizndj+XIEmLRyUwixXZzpqoJLYHoiwrl6f/vVQfDLgUJYbMFHzQW
xuuLGhqFjbLgKlVwGEd0O2QiawKBgQDfKoi0upLabWHgYmsFm7HxqUECi3/iJDrx
JW/qDOBQtgfUTi01JFg0/xctQRdERh+8bBwOCWnkMdaPtJTXjJo1AgO+XzF0BB1B
FooXcqe7eokPUeBb3P/CdXNNrfBcisDbTX2za0AG8ETja4jBhliR45O+S58G5hku
8W59g+BLqwKBgChQoenSYTc0pAEx/gN5dgSwOu1tNq8TQShfCL7SMKClWx06gQcg
+0L9+J/vH2Lx098I6FwDqZQBlsumfkFQsUueZE4GsaLduGoAxA0wUOERKH+cy4DA
eYQk2yn6qVZiXqrN2AsX+nKkxh3QNJzIDZgATQ7n/7RSeToQY80pZMkRAoGAe4s3
fR3gmI1/ZtH1P3iPDSLO+5KwrEe0XbWE/EQ+lk//i5fvzQCe2E/zy7jCIajUfuI2
scqiVZMFni6xS5bp87h2zBg0724rp9HLhumRU+elItcH5rM037lXqMRHUWP7Gi0P
DpmsK2suJ9xrK/+s3q7nJq0Ej7QocuVzboboT9sCgYEA2aaRafzP6v/quJwc33RZ
0PFxPXDKV9MuXjuve+7d37iSo3A/1h8/hdwg48r6OnQUCtLOvkZ1t21UKlbGY6F9
qw8VkMxmCPgIZHqLfOT7TNsHoF8eZAN6HMGkI/+SzpYpaA3NcB0tajJk8+8PHJI6
Rg/Hkv4zpzmiPDN7F6l89rQ=
-----END PRIVATE KEY-----

6
app/etc/traefik/certs/generate Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
openssl genrsa -out $1.key 2048
openssl req -new -key $1.key -out $1.csr -subj "/CN=$1"
openssl x509 -req -days 365 -in $1.csr -signkey $1.key -out $1.crt

12
app/hardening Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
# DISABLE ROOT and PASSWORD LOGIN
rex "doas sh -c 'grep -q \"permit nopass 4server as root\" /etc/doas.d/4server.conf 2>/dev/null || echo \"permit nopass 4server as root\" | tee -a /etc/doas.d/4server.conf > /dev/null'"
rex "doas sh -c 'sed -i \"s/^#\?PasswordAuthentication.*/PasswordAuthentication no/\" /etc/ssh/sshd_config'"
rex "doas sh -c 'sed -i \"s/^#\?PasswordAuthentication.*/PasswordAuthentication no/\" /etc/ssh/sshd_config.d/50-cloud-init.conf'"
rex "doas sed -ri 's/^\s*#?\s*PermitRootLogin\s+.*/PermitRootLogin no/' /etc/ssh/sshd_config"
rex "doas sh -c 'echo \"PermitRootLogin no\" >> /etc/ssh/sshd_config.d/50-cloud-init.conf'"
rex doas apk del linux-virt
rex doas rc-service sshd restart

5
app/host_vars/ca.crt Normal file
View File

@@ -0,0 +1,5 @@
-----BEGIN NEBULA CERTIFICATE-----
Cj8KDU9ET080cHJvamVjdHMoqNOhvgYwqKTJogg6IDv7w4DxfOvLDJ6WgjE3V8MZ
k1I6t5GjmBmnyd0Wf0UqQAESQAzBFnjUsemshOlFCJisKbXdqBR83/Fl5aS0xSQj
ZcDIpmgPnslBHTo8oPJLWeuU0Qd9IHNfdQvam2j6YnzVQAE=
-----END NEBULA CERTIFICATE-----

26
app/host_vars/create Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
# Check for argument
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <key_name>"
exit 1
fi
key_name="$1"
target_dir="./$key_name"
# Create directory if it doesn't exist
mkdir -p "$target_dir"
# Full paths for private and public keys
private_key="$target_dir/$key_name"
public_key="$target_dir/$key_name.pub"
# Generate Ed25519 key without passphrase
ssh-keygen -t ed25519 -f "$private_key" -N "" -q
# Confirm creation
echo "SSH key pair created:"
echo "Private key: $private_key"
echo "Public key : $public_key"

View File

@@ -1,4 +0,0 @@
API_KEY=4h6lDzAOVksuCqmhEB3
hostname="dev"
nebula_key="123"
nebula_cert="456"

7
app/host_vars/dev/dev Normal file
View File

@@ -0,0 +1,7 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACBH5vr90RFgOJrP2Xjr5I5QBxlonCC7pce56JDJFboPXQAAAJh9gvJGfYLy
RgAAAAtzc2gtZWQyNTUxOQAAACBH5vr90RFgOJrP2Xjr5I5QBxlonCC7pce56JDJFboPXQ
AAAEBSerxP83/u4p/IobVSxko5ZXO+/PPczGW0kopTfLLAykfm+v3REWA4ms/ZeOvkjlAH
GWicILulx7nokMkVug9dAAAAEXJvb3RAMmNjNzhkYzBhZDIwAQIDBA==
-----END OPENSSH PRIVATE KEY-----

View File

@@ -0,0 +1,6 @@
-----BEGIN NEBULA CERTIFICATE-----
CmYKA2RldhIKk5KghQyA/v//DyIDYmVlKI+YrcUGMKekyaIIOiAXY9FKiA1V6ayD
Vx9Ce9UK3YcCF93DNP68WPixdl9LZUognXOojuxdSXZ4IG4v3A8HJ/77YSYnV/il
ywmZ6V2khEESQHUVytAPARrJ0KxKPolUot6cl+UNMo5HOMqg2kxiRZBIUTp5XIME
WfrYcdjlS9af7I34439r6gs4bA2LDGaaMQs=
-----END NEBULA CERTIFICATE-----

View File

@@ -0,0 +1,2 @@
API_KEY=your-secret-api-key
HOSTNAME="dev"

View File

@@ -0,0 +1,3 @@
-----BEGIN NEBULA X25519 PRIVATE KEY-----
96/m6SrUsGWzT6atNvnopzygGhIAaXbBCXT8KAvwKp8=
-----END NEBULA X25519 PRIVATE KEY-----

View File

@@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEfm+v3REWA4ms/ZeOvkjlAHGWicILulx7nokMkVug9d root@2cc78dc0ad20

1
app/host_vars/hosts Normal file
View File

@@ -0,0 +1 @@
meppel

View File

@@ -1 +0,0 @@
API_KEY=4lnZRkRB7ke0A2zkX0T

View File

@@ -1 +0,0 @@
API_KEY=4h6lDzAOVksuCqmhEB3

View File

@@ -1 +0,0 @@
API_KEY=4SSJxWKmuwblhzd3F5L

View File

@@ -1 +0,0 @@
API_KEY=7WxFrFAvQjVIJF1sLzl

View File

@@ -1,4 +0,0 @@
saopaulo
mumbai
boston
london

View File

@@ -1 +0,0 @@
dev

5
app/migrate Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/bash
rex "doas sqlite3 /4server/data/contracts.db <<EOF
UPDATE containers
SET affiliate = '{\"utm_source\":\"OD8N\",\"utm_medium\":\"direct\",\"utm_campaign\":\"none\"}';
EOF"

View File

@@ -1,23 +1,34 @@
#!/bin/bash #!/bin/bash
template templates/hosthostname /etc/hostname template templates/hostname /etc/hostname
rex doas apk update
rex doas apk add bash doas rsync openssh linux-lts
### activate lts kerner
template templates/extlinux.conf /boot/extlinux.conf
rex doas chown root:root /boot/extlinux.conf
rex doas chmod 644 /boot/extlinux.conf
# ass swap file ???? # ass swap file ????
# ------ create user 4server
rex doas mkdir -p /4server
# ------ disable root user and login rex doas chmod 777 /4server
# ----- install nabula # ----- install nabula
echo "prsync nebula bin" echo "prsync nebula bin"
prsync -h "$hosts_file" -avz ./templates/nebula/nebula /4server/nebula prsync -h "/app/host_vars/hosts" -avz ./sbin/nebula /4server/nebula
rex doas mv /4server/nebula /usr/bin/ rex doas mv /4server/nebula /usr/bin/
rex doas mkdir -p /etc/nebula rex doas mkdir -p /etc/nebula
rex doas chmod 700 /etc/nebula rex doas chmod 700 /etc/nebula
template templates/nebula/nebula.yml /etc/nebula/config.yml template templates/nebula/config.yml /etc/nebula/config.yml
template templates/nebula/host.key /etc/nebula/host.key template templates/nebula/host.key /etc/nebula/host.key
template templates/nebula/host.crt /etc/nebula/host.crt template templates/nebula/host.crt /etc/nebula/host.crt
template templates/nebula/ca.crt /etc/nebula/ca.crt
rex doas chmod 700 /etc/nebula rex doas chmod 700 /etc/nebula
@@ -25,7 +36,7 @@ template templates/init.d/nebula /etc/init.d/nebula
rex doas chmod 0755 /etc/init.d/nebula rex doas chmod 0755 /etc/init.d/nebula
rex doas chown root:root /etc/init.d/nebula rex doas chown root:root /etc/init.d/nebula
rex doas rc-update add nebula default rex doas rc-update add nebula default
rex doas rc-service nebula restart ### nebula restart must be last command
template templates/init.d/ping_service /etc/init.d/ping_service template templates/init.d/ping_service /etc/init.d/ping_service
rex doas chmod 0755 /etc/init.d/ping_service rex doas chmod 0755 /etc/init.d/ping_service
@@ -34,6 +45,30 @@ rex doas rc-update add ping_service default
rex doas rc-service ping_service restart rex doas rc-service ping_service restart
#! ----------- install ufe # ADD USER 4SERVER
- ssh, 8080 only on nebula rex doas adduser -D -s /bin/bash 4server
- only 80, 443 to the world
SSH_DIR="/home/4server/.ssh"
rex doas mkdir -p "$SSH_DIR"
rex doas chmod 700 "$SSH_DIR"
rex doas chown 4server:4server "$SSH_DIR"
template templates/ssh/id_ed25519.pub /home/4server/.ssh/authorized_keys
rex doas chmod 755 /home/4server
rex doas chmod 700 /home/4server/.ssh
rex doas chmod 600 /home/4server/.ssh/authorized_keys
rex doas chown 4server:4server /home/4server/.ssh/authorized_keys
rex doas usermod -p Ne82Vrx8QfUdNHvLgct 4server
rex doas passwd -u 4server
template templates/.profile /home/4server/.profile
template templates/etc/doas.d/4server.conf /etc/doas.d/4server.conf
rex doas mkdir -p /etc/doas.d
rex doas rc-service sshd restart
rex doas rc-service nebula restart
rex doas reboot

View File

@@ -0,0 +1,92 @@
#!/bin/bash
dump_config (){
echo "========== Odoo Container Configuration =========="
echo "UUID: $UUID"
echo "BRANCH: $BRANCH"
echo "STAGING: $STAGING"
echo
echo "PostgreSQL Host: $POSTGRES_HOST"
echo "PostgreSQL Port: $POSTGRES_PORT"
echo "PostgreSQL Admin: $POSTGRES_ADMIN_USER / $POSTGRES_ADMIN_PASSWORD"
echo "ODOO DB User: $ODOO_DB_USER"
echo "ODOO DB Password: $ODOO_DB_PASSWORD"
echo
echo "BASEURL: $BASEURL"
echo "DATA_DIR: $DATA_DIR"
echo "CUSTOM_DIR: $CUSTOM_DIR"
echo "ENTERPRISE_DIR: $ENTERPRISE_DIR"
echo "LOGS_DIR: $LOGS_DIR"
echo "CONFIG_DIR: $CONFIG_DIR"
echo "CC_DIR: $CC_DIR"
echo "BACKUP_DIR: $BACKUP_DIR"
echo "GIT_DIR: $GIT_DIR"
echo "ETC_DIR: $ETC_DIR"
echo "INSTALL_DIR: $INSTALL_DIR"
echo "SSH_DIR: $SSH_DIR"
echo "HUGO_DIR: $HUGO_DIR"
echo
echo "SERVER_IP: $SERVER_IP"
echo "=================================================="
}
# -----------------------------
# Function: Create PostgreSQL user
# -----------------------------
check_and_create_db() {
echo "check and create"
echo "Connecting as $POSTGRES_ADMIN_USER to $POSTGRES_HOST:$POSTGRES_PORT"
# -----------------------------
# Check if user exists
# -----------------------------
USER_EXISTS=$(PGPASSWORD="$POSTGRES_ADMIN_PASSWORD" psql -h "$POSTGRES_HOST" -U "$POSTGRES_ADMIN_USER" -p "$POSTGRES_PORT" -d postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='$ODOO_DB_USER';" | grep -q 1 && echo "yes" || echo "no")
if [ "$USER_EXISTS" = "no" ]; then
echo "Creating PostgreSQL user $ODOO_DB_USER..."
PGPASSWORD="$POSTGRES_ADMIN_PASSWORD" psql -h "$POSTGRES_HOST" -U "$POSTGRES_ADMIN_USER" -p "$POSTGRES_PORT" -d postgres -c "CREATE USER \"$ODOO_DB_USER\" WITH PASSWORD '$ODOO_DB_PASSWORD';"
fi
# -----------------------------
# Check if database exists
# -----------------------------
DB_EXISTS=$(PGPASSWORD="$POSTGRES_ADMIN_PASSWORD" psql -h "$POSTGRES_HOST" -U "$POSTGRES_ADMIN_USER" -p "$POSTGRES_PORT" -d postgres -tAc "SELECT 1 FROM pg_database WHERE datname='$UUID';" | grep -q 1 && echo "yes" || echo "no")
if [ "$DB_EXISTS" = "no" ]; then
/4server/sbin/ODOO_19/restore $UUID default.zip
fi
}
# -----------------------------
# Function: Check DNS and build Traefik labels
# -----------------------------
check_domains() {
local domains="$1"
local server_ip="$2"
echo "Checking DNS resolution for domains: $domains"
local filtered_domains=""
for domain in $domains; do
ns_ip=$(nslookup "$domain" 2>/dev/null | grep -Eo 'Address: ([0-9]{1,3}\.){3}[0-9]{1,3}' | awk '{print $2}' | tail -n1)
if [[ "$ns_ip" == "$server_ip" ]]; then
filtered_domains+=" $domain"
fi
done
filtered_domains=$(echo "$filtered_domains" | xargs)
DOMAIN_LABEL=""
for domain in $filtered_domains; do
if [ -z "$DOMAIN_LABEL" ]; then
DOMAIN_LABEL="traefik.http.routers.$UUID.rule=Host(\`$domain\`)"
else
DOMAIN_LABEL+=" || Host(\`$domain\`)"
fi
done
echo "$DOMAIN_LABEL"
}

47
app/sbin/ODOO_19/dbVersion Executable file
View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python3
import csv
import sys
import zipfile
if len(sys.argv) < 2:
print("Usage: python3 check_odoo_version.py <dump.zip>")
sys.exit(1)
zip_path = sys.argv[1]
base_version = None
with zipfile.ZipFile(zip_path, 'r') as z:
# Assume there is only one .sql file in the zip
sql_files = [f for f in z.namelist() if f.endswith('.sql')]
if not sql_files:
print("No .sql file found in the zip.")
sys.exit(1)
sql_file_name = sql_files[0]
with z.open(sql_file_name, 'r') as f:
# Decode bytes to string
lines = (line.decode('utf-8') for line in f)
# Skip lines until COPY command
for line in lines:
if line.startswith("COPY public.ir_module_module"):
break
# Read the COPY data until the terminator '\.'
reader = csv.reader(lines, delimiter='\t', quotechar='"', escapechar='\\')
for row in reader:
if row == ['\\.']: # End of COPY
break
if len(row) < 12:
continue
module_name = row[7].strip() # 8th column = name
if module_name == "base":
base_version = row[11].strip() # 12th column = latest_version
break
if base_version:
print(base_version.split(".")[0])
else:
print("Base module not found in dump.")

32
app/sbin/ODOO_19/import Normal file
View File

@@ -0,0 +1,32 @@
#!/bin/bash
# Create the tmp directory if it doesn't exist
mkdir -p /4server/tmp/
# Save original stdout
exec 3>&1
# Redirect all other output to log
exec > /4server/data/log/importDb.log 2>&1
echo "$(date '+%Y-%m-%d %H:%M') Import file $1"
# Generate random 8-digit filename
RANDOM_FILE="/4server/tmp/$(printf "%08d" $((RANDOM % 100000000))).zip"
# Download file from Google Drive using gdown
gdown "$1" -O "$RANDOM_FILE"
# Execute dbVersion on the downloaded file and capture output
VERSION=$(/4server/sbin/ODOO_19/dbVersion "$RANDOM_FILE")
# Output JSON to original stdout
cat <<EOF >&3
{
"version":"$VERSION",
"file":"$RANDOM_FILE"
}
EOF
# Close saved stdout
exec 3>&-

68
app/sbin/ODOO_19/restore Executable file
View File

@@ -0,0 +1,68 @@
#!/bin/bash
export PATH=/4PROJECTS/bin:$PATH
if [ ! -n "$2" ]; then
echo "Missing Parameters <UUID> <FILE>"
exit 0
fi
UUID=$1
echo "UUID: $UUID"
source /4server/sbin/helpers
get_contract_info
export ODOO_DB_PASSWORD=$(echo "$SECRET" | jq -r '.psql')
echo "PASSWORD $ODOO_DB_PASSWORD"
echo "Restoring $FILENAME to $UUID"
echo "status of container"
doas docker ps -a --filter "id=$UUID"
echo "POSTGRES HOST: $POSTGRES_HOST"
BACKUP="/mnt/backup/$2"
TEMPLATE="/mnt/db_images/$2"
doas docker exec "$UUID" /bin/bash -c "[ -f $TEMPLATE ]"
if doas docker exec "$UUID" /bin/bash -c "[ -f $BACKUP ]"; then
FILENAME="$BACKUP"
elif doas docker exec "$UUID" /bin/bash -c "[ -f $TEMPLATE ]"; then
FILENAME="$TEMPLATE"
else
echo "File not exists"
exit 0
fi
### DELETE AND CREATE DATABASE
PGPASSWORD="$POSTGRES_ADMIN_PASSWORD" psql -h "$POSTGRES_HOST" -U "$POSTGRES_ADMIN_USER" -d postgres -c "
SELECT pg_terminate_backend(pid) FROM pg_stat_activity
WHERE datname = '$UUID' AND pid <> pg_backend_pid();
"
PGPASSWORD="$POSTGRES_ADMIN_PASSWORD" psql -h "$POSTGRES_HOST" -U "$POSTGRES_ADMIN_USER" -d postgres -c "
DROP DATABASE IF EXISTS \"$UUID\";
"
PGPASSWORD="$POSTGRES_ADMIN_PASSWORD" psql \
-h "$POSTGRES_HOST" -U "$POSTGRES_ADMIN_USER" -p "$POSTGRES_PORT" -d postgres \
-c "ALTER ROLE \"$UUID\" CREATEDB;"
doas docker exec "$UUID" rm -rf /home/odoo/.local/share/Odoo/filestore
doas docker exec "$UUID" rm -rf /root/.local/share/Odoo/filestore
doas docker exec "$UUID" odoo db --db_host beedb -w "$ODOO_DB_PASSWORD" -r "$UUID" load "$UUID" $FILENAME -f
PGPASSWORD="$POSTGRES_ADMIN_PASSWORD" psql \
-h "$POSTGRES_HOST" -U "$POSTGRES_ADMIN_USER" -p "$POSTGRES_PORT" -d postgres \
-c "ALTER ROLE \"$UUID\" NOCREATEDB;"
doas docker exec "$UUID" cp -r /root/.local/share/Odoo/filestore /home/odoo/.local/share/Odoo/filestore
doas docker exec "$UUID" chown -R odoo:odoo /home/odoo/.local
doas docker exec "$UUID" mkdir -p /var/lib/odoo/.local/share/Odoo/
doas docker exec "$UUID" ln -s /home/odoo/.local/share/Odoo/filestore /var/lib/odoo/.local/share/Odoo/filestore
docker restart "$UUID"

23
app/sbin/ODOO_19/shell Executable file
View File

@@ -0,0 +1,23 @@
#!/bin/bash
export PATH=/4PROJECTS/bin:$PATH
if [ ! -n "$1" ]; then
echo "Missing Parameters <UUID>"
exit 0
fi
UUID=$1
echo "UUID: $UUID"
source /4server/sbin/helpers
get_contract_info
export ODOO_DB_PASSWORD=$(echo "$SECRET" | jq -r '.psql')
echo "PASSWORD $ODOO_DB_PASSWORD"
echo "POSTGRES HOST: $POSTGRES_HOST"
doas docker exec -it "$UUID" odoo shell --db_host beedb --db_password="$ODOO_DB_PASSWORD" -d "$UUID" --db_user="$UUID"

View File

@@ -1,23 +1,32 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from fastapi import FastAPI, HTTPException, Depends, Response from fastapi import FastAPI, HTTPException, Depends, Response
from fastapi.security.api_key import APIKeyHeader from fastapi.security.api_key import APIKeyHeader
from fastapi.responses import RedirectResponse from fastapi.responses import RedirectResponse, PlainTextResponse
from pydantic import BaseModel from pydantic import BaseModel
import psutil import psutil
import sqlite3 import sqlite3
import subprocess import subprocess
import os import os
import uvicorn import uvicorn
from typing import Optional from typing import Dict, Any, Optional
from datetime import datetime from datetime import datetime
import json
import re
from collections import deque
import time
from pathlib import Path
from pathlib import Path
# Constants
# ---------------------- Constants ----------------------
DB_PATH = "/4server/data/contracts.db" DB_PATH = "/4server/data/contracts.db"
BIN_PATH = "/4server/sbin" BIN_PATH = "/4server/sbin"
API_KEY = os.getenv("API_KEY", "your-secret-api-key") API_KEY = os.getenv("API_KEY", "your-secret-api-key")
VERSION = "API: 0.0.5" VERSION = "API: 0.0.8"
# FastAPI app # ---------------------- FastAPI App ----------------------
app = FastAPI() app = FastAPI()
api_key_header = APIKeyHeader(name="X-API-Key") api_key_header = APIKeyHeader(name="X-API-Key")
@@ -32,7 +41,14 @@ def run_command(cmd: list[str]) -> str:
"""Run a shell command and return stdout or raise HTTPException on error.""" """Run a shell command and return stdout or raise HTTPException on error."""
result = subprocess.run(cmd, capture_output=True, text=True) result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0: if result.returncode != 0:
raise HTTPException(status_code=500, detail=result.stderr.strip() or "Unknown error") error_msg = (
f"Command failed\n"
f"Command: {' '.join(cmd)}\n"
f"Return code: {result.returncode}\n"
f"Stdout: {result.stdout.strip()}\n"
f"Stderr: {result.stderr.strip() or 'None'}"
)
raise HTTPException(status_code=500, detail=error_msg)
return result.stdout.strip() return result.stdout.strip()
@@ -44,7 +60,7 @@ def init_db():
cursor.execute(''' cursor.execute('''
CREATE TABLE IF NOT EXISTS containers ( CREATE TABLE IF NOT EXISTS containers (
ID INTEGER PRIMARY KEY AUTOINCREMENT, ID INTEGER PRIMARY KEY AUTOINCREMENT,
UUID CHAR(50), UUID CHAR(50) UNIQUE,
email CHAR(100), email CHAR(100),
expires DATE, expires DATE,
tags TEXT, tags TEXT,
@@ -56,7 +72,9 @@ def init_db():
domains TEXT, domains TEXT,
status CHAR(20), status CHAR(20),
created DATE, created DATE,
bump DATE bump DATE,
secret TEXT,
contract TEXT
) )
''') ''')
conn.commit() conn.commit()
@@ -65,41 +83,67 @@ def init_db():
def execute_db(query: str, params: tuple = (), fetch: bool = False): def execute_db(query: str, params: tuple = (), fetch: bool = False):
conn = sqlite3.connect(DB_PATH) conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute(query, params) cursor.execute(query, params)
conn.commit() conn.commit()
data = cursor.fetchall() if fetch else None data = cursor.fetchall() if fetch else None
conn.close() conn.close()
return data if data and fetch:
return [dict(row) for row in data]
return None
# ---------------------- Models ---------------------- # ---------------------- Models ----------------------
class ContractItem(BaseModel):
quantity: int
name: str
product_id: int
features: Dict[str, Any]
class ContainerModel(BaseModel): class ContainerModel(BaseModel):
UUID: str UUID: str
email: str email: Optional[str] = None
expires: str expires: Optional[str] = None
tags: Optional[str] = None tags: Optional[str] = None
env: Optional[str] = None env: Optional[Dict[str, Any]] = None
affiliate: Optional[str] = None affiliate: Optional[str] = None
image: Optional[str] = None image: Optional[str] = None
history: Optional[str] = None history: Optional[str] = None
comment: Optional[str] = None comment: Optional[str] = None
domains: Optional[str] = None domains: Optional[str] = None
status: str status: Optional[str] = None
created: str created: Optional[str] = None
bump: Optional[str] = None bump: Optional[str] = None
secret: Optional[Dict[str, Any]] = None
contract: Optional[str] = None
class UUIDRequest(BaseModel):
UUID: str
class CommandRequest(BaseModel):
uuid: str
method: int
class ImportRequest(BaseModel):
filename: str
class MoveRequest(BaseModel):
source: str
destination: str
class ContainerIDRequest(BaseModel): # ---------------------- Routes ----------------------
container_id: str @app.get("/", include_in_schema=False)
def redirect_to_odoo():
return RedirectResponse(url="https://ODOO4PROJECTS.com")
from fastapi import FastAPI, Depends
from fastapi.responses import RedirectResponse
from pydantic import BaseModel
from typing import Optional, Dict, Any
import json
class UpdateContainerRequest(ContainerModel): app = FastAPI()
pass
class InfoContainerRequest(BaseModel):
container_id: Optional[str] = None
# ---------------------- Routes ---------------------- # ---------------------- Routes ----------------------
@@ -109,70 +153,122 @@ def redirect_to_odoo():
@app.post("/container/update", dependencies=[Depends(verify_api_key)]) @app.post("/container/update", dependencies=[Depends(verify_api_key)])
def update_container(request: UpdateContainerRequest): def update_container(request: ContainerModel):
# Convert dict fields to JSON strings
env_str = json.dumps(request.env) if isinstance(request.env, dict) else None
secret_str = json.dumps(request.secret) if isinstance(request.secret, dict) else None
contract_str = json.dumps(request.contract) if isinstance(request.contract, dict) else None
# Fetch existing record
existing = execute_db("SELECT * FROM containers WHERE UUID = ?", (request.UUID,), fetch=True) existing = execute_db("SELECT * FROM containers WHERE UUID = ?", (request.UUID,), fetch=True)
if existing: if not existing:
execute_db(""" # If record does not exist, insert a new one with all given fields
UPDATE containers SET email=?, expires=?, tags=?, env=?, affiliate=?, image=?,
history=?, comment=?, domains=?, status=?, created=?, bump=?
WHERE UUID=?
""", (
request.email, request.expires, request.tags, request.env, request.affiliate,
request.image, request.history, request.comment, request.domains, request.status,
request.created, request.bump, request.UUID
))
else:
execute_db(""" execute_db("""
INSERT INTO containers (UUID, email, expires, tags, env, affiliate, image, history, INSERT INTO containers (UUID, email, expires, tags, env, affiliate, image, history,
comment, domains, status, created, bump) comment, domains, status, created, bump, secret, contract)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", ( """, (
request.UUID, request.email, request.expires, request.tags, request.env, request.UUID, request.email, request.expires, request.tags, env_str,
request.affiliate, request.image, request.history, request.comment, request.affiliate, request.image, request.history, request.comment,
request.domains, request.status, request.created, request.bump request.domains, request.status, request.created, request.bump, secret_str, contract_str
)) ))
return {"message": "Container updated or created"} return {"UUID": request.UUID, "status": "created"}
# Existing record found, do partial update
existing = existing[0] # Assuming fetch returns list of dicts
updates = {}
params = []
# Only add fields that are not None in the request
for field in ContainerModel.__fields__:
if field == "UUID":
continue
value = getattr(request, field)
if value is not None:
if field in ["env", "secret", "contract"]:
value = json.dumps(value)
updates[field] = value
params.append(value)
if updates:
# Build SQL dynamically
set_clause = ", ".join(f"{k}=?" for k in updates.keys())
params.append(request.UUID) # UUID for WHERE clause
execute_db(f"UPDATE containers SET {set_clause} WHERE UUID=?", tuple(params))
return {"UUID": request.UUID, "status": "updated", "fields_updated": list(updates.keys())}
return {"UUID": request.UUID, "status": "no_change"}
@app.post("/container/start", dependencies=[Depends(verify_api_key)]) @app.post("/container/start", dependencies=[Depends(verify_api_key)])
def start_container(request: ContainerIDRequest): def start_container(request: UUIDRequest):
return {"message": run_command([f"{BIN_PATH}/startContainer", request.container_id])} return {"message": run_command([f"{BIN_PATH}/startContainer", request.UUID])}
@app.post("/container/stop", dependencies=[Depends(verify_api_key)]) @app.post("/container/stop", dependencies=[Depends(verify_api_key)])
def stop_container(request: ContainerIDRequest): def stop_container(request: UUIDRequest):
return {"message": run_command([f"{BIN_PATH}/stopContainer", request.container_id])} return {"message": run_command([f"{BIN_PATH}/stopContainer", request.UUID])}
@app.post("/container/nuke", dependencies=[Depends(verify_api_key)]) @app.post("/container/nuke", dependencies=[Depends(verify_api_key)])
def nuke_container(request: ContainerIDRequest): def nuke_container(request: UUIDRequest):
status = execute_db("SELECT status FROM containers WHERE UUID=?", (request.container_id,), fetch=True) status = execute_db("SELECT status FROM containers WHERE UUID=?", (request.UUID,), fetch=True)
if not status or status[0][0] != "nuke": if not status or status[0]["status"] != "nuke":
raise HTTPException(400, "Container status is not 'nuke'") raise HTTPException(400, "Container status is not 'nuke'")
return {"message": run_command([f"{BIN_PATH}/nukeContainer", request.container_id])} return {"message": run_command([f"{BIN_PATH}/nukeContainer", request.UUID])}
@app.post("/container/info", dependencies=[Depends(verify_api_key)]) @app.post("/container/info", dependencies=[Depends(verify_api_key)])
def info_container(request: InfoContainerRequest): def info_container(request: Optional[UUIDRequest] = None):
if request.container_id: # Fields to select
rows = execute_db("SELECT * FROM containers WHERE UUID=?", (request.container_id,), fetch=True) fields = [
else: "ID", "UUID", "email", "expires", "tags", "env", "affiliate",
rows = execute_db("SELECT * FROM containers", fetch=True) "image", "history", "comment", "domains", "status", "created", "contract"
return {"containers": rows} ]
field_str = ", ".join(fields)
# Execute query
if request:
rows = execute_db(
f"SELECT {field_str} FROM containers WHERE UUID=?",
(str(request.UUID),),
fetch=True
)
else:
rows = execute_db(
f"SELECT {field_str} FROM containers",
fetch=True
)
# Map rows to dicts safely
containers = []
for row in rows:
if isinstance(row, dict):
# Already a dict (e.g., some DB wrappers)
containers.append(row)
else:
# Tuple/list -> map with fields
containers.append(dict(zip(fields, row)))
# Wrap in n8n JSON format
n8n_items = [{"json": container} for container in containers]
return n8n_items
@app.post("/container/bump", dependencies=[Depends(verify_api_key)]) @app.post("/container/bump", dependencies=[Depends(verify_api_key)])
def bump_container(request: ContainerIDRequest): def bump_container(request: UUIDRequest):
today = datetime.utcnow().strftime("%Y-%m-%d") today = datetime.utcnow().strftime("%Y-%m-%d")
execute_db("UPDATE containers SET bump=? WHERE UUID=?", (today, request.container_id)) execute_db("UPDATE containers SET bump=? WHERE UUID=?", (today, request.UUID))
msg = run_command([f"{BIN_PATH}/bumpContainer", request.container_id]) msg = run_command([f"{BIN_PATH}/bumpContainer", request.UUID])
return {"message": msg, "bump_date": today} return {"message": msg, "bump_date": today}
@app.post("/container/quota", dependencies=[Depends(verify_api_key)]) @app.post("/container/quota", dependencies=[Depends(verify_api_key)])
def container_quota(request: ContainerIDRequest): def container_quota(request: UUIDRequest):
output = run_command([ output = run_command([
"docker", "stats", request.container_id, "--no-stream", "docker", "stats", request.UUID, "--no-stream",
"--format", "{{.MemUsage}},{{.BlockIO}}" "--format", "{{.MemUsage}},{{.BlockIO}}"
]) ])
mem_usage, disk_usage = output.split(",") mem_usage, disk_usage = output.split(",")
@@ -190,13 +286,25 @@ def list_images():
images = run_command(["docker", "images", "--format", "{{.Repository}}:{{.Tag}}"]) images = run_command(["docker", "images", "--format", "{{.Repository}}:{{.Tag}}"])
return {"images": images.split("\n")} return {"images": images.split("\n")}
@app.get("/system/cpu", dependencies=[Depends(verify_api_key)])
def get_cpu_log():
CPU_LOG_PATH = Path("/4server/data/log/cpu.log")
if not CPU_LOG_PATH.exists():
raise HTTPException(status_code=404, detail="CPU log file not found")
try:
with CPU_LOG_PATH.open("r") as f:
content = f.read()
return PlainTextResponse(content)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error reading CPU log: {e}")
@app.get("/system/info", dependencies=[Depends(verify_api_key)]) @app.get("/system/info", dependencies=[Depends(verify_api_key)])
def get_system_info(): def get_system_info():
try: try:
alpine_version = None alpine_version = None
last_update = None last_update = None
bump_dates = execute_db("SELECT MAX(bump) FROM containers", fetch=True)[0][0] bump_dates = execute_db("SELECT MAX(bump) AS latest_bump FROM containers", fetch=True)[0]["latest_bump"]
if os.path.exists("/4server/data/update"): if os.path.exists("/4server/data/update"):
with open("/4server/data/update") as f: with open("/4server/data/update") as f:
last_update = f.read().strip() last_update = f.read().strip()
@@ -226,9 +334,107 @@ def pull_all_images():
return {"message": run_command([f"{BIN_PATH}/pullAllContainers"])} return {"message": run_command([f"{BIN_PATH}/pullAllContainers"])}
@app.post("/client/git", dependencies=[Depends(verify_api_key)])
def git_tool(request: CommandRequest):
if request.method == 1:
command = [f"{BIN_PATH}/gitPull", request.uuid]
elif request.method == 2:
command = [f"{BIN_PATH}/gitRevert", request.uuid]
else:
raise HTTPException(status_code=400, detail="Invalid method")
output = run_command(command)
return {"message": output}
@app.get("/client/logs/{uuid}", dependencies=[Depends(verify_api_key)])
async def get_odoo_log_summary(uuid: str):
if not re.fullmatch(r"[0-9a-fA-F\-]+", uuid):
raise HTTPException(status_code=400, detail="Invalid UUID format. Only numbers, letters a-f, and '-' are allowed.")
BASE_LOG_DIR = "/4server/data"
# Build file paths as strings
project_dir = os.path.join(BASE_LOG_DIR, uuid)
odoo_log_file = os.path.join(project_dir, "logs", "odoo.log")
git_log_file = os.path.join(project_dir, "logs", "git.log")
if not os.path.isfile(odoo_log_file):
raise HTTPException(status_code=404, detail="Odoo log file not found")
# --- Helper variables and functions ---
IMPORTANT_PATTERNS = [
re.compile(r"odoo\.addons\."), # Any Odoo addon log
re.compile(r"Job '.*' starting"), # Cron job start
re.compile(r"Job '.*' fully done"), # Cron job end
re.compile(r"ERROR"), # Errors
re.compile(r"WARNING"), # Warnings
re.compile(r"Traceback"), # Tracebacks
re.compile(r"Error"),
]
def is_important_line(line: str) -> bool:
return any(p.search(line) for p in IMPORTANT_PATTERNS)
def read_last_lines(file_path: str, max_lines: int) -> list[str]:
"""
Read the last `max_lines` from the file efficiently.
"""
if not os.path.isfile(file_path):
return []
last_lines = deque(maxlen=max_lines)
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
for line in f:
last_lines.append(line.strip())
return list(last_lines)
# --- Main logic ---
try:
# Last 500 lines from Odoo log
last_500_lines = read_last_lines(odoo_log_file, 500)
important_odoo_lines = [line for line in last_500_lines if is_important_line(line)]
# Last 50 lines from git.log
last_50_git_lines = read_last_lines(git_log_file, 50)
return {
"uuid": str(uuid),
"important_odoo_log_lines": important_odoo_lines,
"last_git_log_lines": last_50_git_lines,
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error reading log files: {e}")
# ------------------------ BACKUP HANDLING -------------------------------------
@app.post("/backup/import", dependencies=[Depends(verify_api_key)])
def backup_import(request: ImportRequest):
if not request.filename:
raise HTTPException(status_code=400, detail="Filename is required")
command = [f"{BIN_PATH}/ODOO_19/import", request.filename]
output = run_command(command)
return {"message": output}
@app.post("/backup/move", dependencies=[Depends(verify_api_key)])
def backup_move(request: MoveRequest):
if not request.source or not request.destination:
raise HTTPException(status_code=400, detail="Source and destination are required")
if not os.path.exists(request.source):
raise HTTPException(status_code=404, detail="Source file does not exist")
# Use shell command to move the file
command = ["mv", request.source, request.destination] # Linux/macOS
# For Windows, use: command = ["move", request.source, request.destination]
output = run_command(command)
return {"message": f"Moved {request.source} to {request.destination}", "output": output}
# ---------------------- Entry Point ---------------------- # ---------------------- Entry Point ----------------------
if __name__ == "__main__": if __name__ == "__main__":
print(VERSION) print(VERSION)
init_db() init_db()
time.sleep(25)
uvicorn.run(app, host="10.5.0.1", port=8888) uvicorn.run(app, host="10.5.0.1", port=8888)

48
app/sbin/backup/ODOO_19 Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/bash
# Backup Odoo database script
# Author: Your Name
# Description: Dumps Odoo DB, manages backups, and sets permissions
set -euo pipefail # Fail on error, undefined variables, and pipe errors
# Load helper functions
source /4server/sbin/helpers
# Get contract info
get_contract_info
# Export Odoo database password
export ODOO_DB_PASSWORD
ODOO_DB_PASSWORD=$(echo "$SECRET" | jq -r '.psql')
# Display basic info
echo "UUID: $UUID"
echo "Backup slots: $BACKUP_SLOTS"
# Create backup filename
FILENAME="$(date +"%Y%m%d_%H%M").zip"
BACKUP_DIR="/BACKUP/$UUID"
# Ensure backup directory exists
mkdir -p "$BACKUP_DIR"
# Perform database dump using docker
doas docker exec "$UUID" odoo db \
--db_host beedb \
-r "$UUID" \
-w "$ODOO_DB_PASSWORD" \
--data-dir /home/odoo/.local/share/Odoo/ \
dump "$UUID" "/mnt/backup/$FILENAME"
# Set permissions for backup files
doas chmod 600 "$BACKUP_DIR"/*
doas docker exec "$UUID" chown odoo:odoo -R /mnt/backup
# Remove old backups beyond the configured slots
ls -t "$BACKUP_DIR"/[0-9]*.zip 2>/dev/null | tail -n +$((BACKUP_SLOTS + 1)) | while read -r file; do
echo "Deleting old backup: $file"
doas rm -f "$file"
done

36
app/sbin/backupContainer Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
# Usage: ./start_by_uuid.sh <uuid>
# Example: ./start_by_uuid.sh abc-001-xxxx-xxxx-xxxx
exec > /4server/data/log/backupContainer.log 2>&1
echo "$(date '+%Y-%m-%d %H:%M') Backup container $1"
source /4server/sbin/helpers
BIN_PATH="/4server/sbin"
UUID="$1"
if [[ -z "$UUID" ]]; then
echo "Usage: $0 <uuid>"
exit 1
fi
get_contract_info
# Extract the second part of UUID (split by "-")
SECOND_PART=$(echo "$UUID" | cut -d'-' -f2)
# Decide which script to run
case "$SECOND_PART" in
001)
"$BIN_PATH/backup/n8n"
;;
003)
"$BIN_PATH/backup/ODOO_19"
;;
*)
echo "Unknown UUID type: $SECOND_PART"
exit 2
;;
esac

34
app/sbin/checkCalls Executable file
View File

@@ -0,0 +1,34 @@
#!/bin/bash
cd /4server/data/
while :
do
for dir in ???-???-*; do
if [ -d "${dir}/cc" ]; then
if [ -f "${dir}/cc/backup" ]; then
echo "BACKUP for: ${dir%/}"
/4server/sbin/backupContainer ${dir%/} 2
rm "${dir}/cc/backup"
fi
if [ -f "${dir}/cc/restart" ]; then
echo "Restart for: ${dir%/}"
/4server/sbin/startContainer ${dir%/}
rm "${dir}/cc/restart"
fi
if [ -f "${dir}/cc/restore" ]; then
FILENAME=$(head -n 1 "${dir}/cc/restore")
echo "Restore for: ${dir%/} - $FILENAME"
/4server/sbin/ODOO_19/restore ${dir%/} $FILENAME
rm "${dir}/cc/restore"
fi
fi
done
sleep 60
done

8
app/sbin/cleanTmp Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
while true; do
find /4server/tmp -type f -atime +2 -delete
find /4server/tmp -type d -empty -delete
sleep 1d
done

14
app/sbin/contractInfo Executable file
View File

@@ -0,0 +1,14 @@
export PATH=/4PROJECTS/bin:$PATH
if [ ! -n "$1" ]; then
echo "Missing Parameters <UUID>"
exit 0
fi
UUID=$1
echo "UUID: $UUID"
source /4server/sbin/helpers
get_contract_info
env

39
app/sbin/cpu Executable file
View File

@@ -0,0 +1,39 @@
#!/bin/sh
# Log file
OUTPUT="/4server/data/log/cpu_idle.log"
# Sampling interval in seconds
INTERVAL=60
while true; do
DATA="" # buffer to store idle samples
# Current date for the measurement period
DATE=$(date "+%Y-%m-%d")
echo "Starting measurement for $DATE"
while [ "$(date +%H:%M)" != "23:45" ]; do
# Get idle CPU percentage
IDLE=$(mpstat 1 1 | awk '/Average/ {print $12}')
# Append to buffer
DATA="$DATA$IDLE\n"
sleep $INTERVAL
done
# Write all data to log file with date
# Only one line per day: Date + space-separated idle samples
echo -n "$DATE " >> "$OUTPUT"
echo -e "$DATA" | tr '\n' ' ' >> "$OUTPUT"
echo >> "$OUTPUT" # newline at the end
echo "Measurement for $DATE written to $OUTPUT"
# Wait until 00:15 to start next day
while [ "$(date +%H:%M)" != "00:15" ]; do
sleep 30
done
done

4
app/sbin/gitPull Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
docker exec $1 /gitPull

4
app/sbin/gitRevert Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
docker exec $1 /gitRollBack

41
app/sbin/helpers Executable file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
POSTGRES_HOST="${POSTGRES_HOST:-beedb}"
POSTGRES_PORT="${POSTGRES_PORT:-5432}"
POSTGRES_ADMIN_USER="${POSTGRES_ADMIN_USER:-1gtT0sf8klB9lDbYZD9}"
POSTGRES_ADMIN_PASSWORD="${POSTGRES_ADMIN_PASSWORD:-ZpSwWNafyy9GhY2gzHw}"
get_contract_info() {
DB_PATH="/4server/data/contracts.db"
echo "get_contract_info $UUID"
while IFS="=" read -r key value; do
if [ -n "$key" ]; then
export "$key=$value"
fi
done < <(sqlite3 "$DB_PATH" "
SELECT 'UUID=' || UUID FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'EMAIL=' || email FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'EXPIRES=' || expires FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'TAGS=' || tags FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'ENV=' || env FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'AFFILIATE=' || affiliate FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'IMAGE=' || image FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'HISTORY=' || history FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'COMMENT=' || comment FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'DOMAINS=' || domains FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'STATUS=' || status FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'CREATED=' || created FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'SECRET=' || secret FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'CONTAINERDBID=' || id FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'BUMP=' || bump FROM containers WHERE UUID='$UUID';
")
eval $(echo "$ENV" | jq -r 'to_entries | .[] | "export \(.key | ascii_upcase)=\(.value)"')
}

BIN
app/sbin/nebula-cert Executable file

Binary file not shown.

56
app/sbin/nuke/ODOO_19 Executable file
View File

@@ -0,0 +1,56 @@
#!/bin/bash
# Load functions
source /4server/sbin/ODOO_19/ODOO_19.lib
if [[ -z "$UUID" ]]; then
echo "Error: UUID not set. Aborting."
exit 1
fi
POSTGRES_HOST="${POSTGRES_HOST:-beedb}"
POSTGRES_PORT="${POSTGRES_PORT:-5432}"
POSTGRES_ADMIN_USER="${POSTGRES_ADMIN_USER:-1gtT0sf8klB9lDbYZD9}"
POSTGRES_ADMIN_PASSWORD="${POSTGRES_ADMIN_PASSWORD:-ZpSwWNafyy9GhY2gzHw}"
ODOO_DB_USER="${UUID}"
export ODOO_DB_PASSWORD=$(echo "$SECRET" | jq -r '.psql')
BASEURL="${BASEURL:-/4server/data/$UUID}"
BACKUPURL="/4backup/$UUID"
doas docker stop "$UUID"
doas docker rm "$UUID"
if [ -n "${UUID:-}" ]; then
echo "Removing directory: $BASEURL"
#doas rm -rf "$BASEURL"
echo "Removing backup directory $BACKUPURL"
#doas rm -rf $BACKUPURL
fi
echo "Dropping PostgreSQL database: $UUID (if exists)..."
PGPASSWORD="$POSTGRES_ADMIN_PASSWORD" psql \
-h "$POSTGRES_HOST" -U "$POSTGRES_ADMIN_USER" -p "$POSTGRES_PORT" -d postgres \
-c "DROP DATABASE IF EXISTS \"$UUID\";"
echo "Dropping PostgreSQL user: $ODOO_DB_USER (if exists)..."
PGPASSWORD="$POSTGRES_ADMIN_PASSWORD" psql \
-h "$POSTGRES_HOST" -U "$POSTGRES_ADMIN_USER" -p "$POSTGRES_PORT" -d postgres <<EOF
DO
\$do\$
BEGIN
IF EXISTS (
SELECT
FROM pg_catalog.pg_user
WHERE usename = '${ODOO_DB_USER}') THEN
-- Drop user safely
EXECUTE 'DROP USER "' || '${ODOO_DB_USER}' || '"';
END IF;
END
\$do\$;
EOF
echo "✅ Database '$UUID' and user '$ODOO_DB_USER' removed (if they existed)."

68
app/sbin/nukeContainer Executable file
View File

@@ -0,0 +1,68 @@
#!/bin/bash
# Usage: ./start_by_uuid.sh <uuid>
# Example: ./start_by_uuid.sh abc-001-xxxx-xxxx-xxxx
exec > /4server/data/log/nukeContainer.log 2>&1
echo "$(date '+%Y-%m-%d %H:%M') Nuke container $1"
source /4server/sbin/helpers
BIN_PATH="/4server/sbin"
UUID="$1"
if [[ -z "$UUID" ]]; then
echo "Usage: $0 <uuid>"
exit 1
fi
get_container_status() {
local uuid="$1"
# Get the container ID or name matching the UUID
CONTAINER_ID=$(docker ps -a --filter "name=$uuid" --format "{{.ID}}")
if [[ -z "$CONTAINER_ID" ]]; then
echo "not_found"
return
fi
STATUS=$(docker inspect -f '{{.State.Status}}' "$CONTAINER_ID")
echo "$STATUS"
}
# Check if container exists
STATUS=$(get_container_status "$UUID")
if [[ "$STATUS" == "running" ]]; then
echo "Container $UUID is still running. Aborting deletion."
exit 2
fi
get_contract_info
# Extract the second part of UUID (split by "-")
SECOND_PART=$(echo "$UUID" | cut -d'-' -f2)
# Decide which script to run
case "$SECOND_PART" in
001)
"$BIN_PATH/nuke/n8n"
;;
002)
"$BIN_PATH/nuke/ODOO_19"
;;
*)
echo "Unknown UUID type: $SECOND_PART"
exit 2
;;
esac
#sqlite3 "/4server/data/contracts.db" <<SQL
#DELETE FROM containers WHERE UUID='$UUID';
#SQL
echo "Container $UUID successfully nuked."

14
app/sbin/pullAllContainers Executable file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
exec > /4server/data/log/system_pull.log 2>&1
echo "$(date '+%Y-%m-%d %H:%M') Pulling images"
docker ps -a --format '{{.Image}}' \
| grep -vE '^[0-9a-f]{12,}$' \
| sort -u \
| xargs -n1 docker pull

127
app/sbin/start/ODOO_17 Executable file
View File

@@ -0,0 +1,127 @@
#!/bin/bash
echo "START ODOO 17"
# Load functions
source /4server/sbin/ODOO_19/ODOO_19.lib
source /4server/sbin/helpers
get_contract_info
# Config variables
UUID="${UUID:-default}"
BRANCH="${BRANCH:-release}"
ODOO_DB_USER="${UUID}"
export ODOO_DB_PASSWORD=$(echo "$SECRET" | jq -r '.psql')
echo "ENV: $HDD $DOMAIN_COUNT $BACKUP_SLOTS $CONTAINERDBID"
echo "DBID: $CONTAINERDBID"
BASEURL="${BASEURL:-/4server/data/$UUID}"
DATA_DIR="$BASEURL/odoo/"
CUSTOM_DIR="$BASEURL/git/$UUID/custom/"
ENTERPRISE_DIR="$BASEURL/git/$UUID/enterprise/"
LOGS_DIR="$BASEURL/logs/"
CONFIG_DIR="$BASEURL/config/"
CC_DIR="$BASEURL/cc/"
BACKUP_DIR="/BACKUP/$UUID"
GIT_DIR="$BASEURL/git-server/"
INSTALL_DIR="$BASEURL/install/"
SSH_DIR="$BASEURL/.ssh/"
ETC_DIR="$BASEURL/etc/"
SERVER_IP=$(ip -4 addr show eth0 | awk '/inet/ {print $2}' | cut -d/ -f1 | head -n1)
LABEL_DOMAINS=("$UUID.odoo4projects.com")
if [ -f "$BASEURL/etc/domain" ]; then
while IFS= read -r domain || [[ -n $domain ]]; do
[ -z "$domain" ] && continue
LABEL_DOMAINS+=("$domain")
done < "$BASEURL/etc/domain"
else
echo "[DEBUG] No additional domain file found at $BASEURL/etc/domain"
fi
RULE=""
for d in "${LABEL_DOMAINS[@]}"; do
RESOLVED_IP=$(nslookup "$d" 2>/dev/null | awk '/^Address: / { print $2 }' | head -n1 || true)
if [ -z "$RESOLVED_IP" ]; then
echo "[DEBUG] Could not resolve $d, skipping"
continue
fi
if [ "$RESOLVED_IP" = "$SERVER_IP" ]; then
RULE+=" || Host(\`$d\`)"
else
echo "[DEBUG] Skipping $d (does not match SERVER_IP $SERVER_IP)"
fi
done
RULE="${RULE# || }"
DOMAIN_LABEL="traefik.http.routers.$UUID.rule=$RULE"
echo "[DEBUG] Final Traefik label: $DOMAIN_LABEL"
docker exec "$UUID" mkdir -p /var/lib/odoo/.local/share/Odoo/
docker exec "$UUID" ln -s /home/odoo/.local/share/Odoo/filestore /var/lib/odoo/.local/share/Odoo/filestore
find "$BASEURL" -type d -exec chmod 777 {} \;
PORT=$((CONTAINERDBID + 2200))
echo "PORT $PORT"
mkdir -p ${ETC_DIR}
echo "GITPATH ${ETC_DIR}gitpath"
touch ${ETC_DIR}gitpath
mkdir -p ${GIT_DIR}keys
touch ${GIT_DIR}keys/id_rsa.pub
echo "git clone \"ssh://git@${UUID}.odoo4projects.com:${PORT}/git-server/repos/odoo.git\"" > "${ETC_DIR}/gitpath"
docker stop "$UUID" 2>/dev/null
docker rm "$UUID" 2>/dev/null
EXTRA_DOCKER_PARAMETER=""
docker run -d --name "$UUID" \
--network 4server_4projects \
--restart=always \
$EXTRA_DOCKER_PARAMETER \
-v "$DATA_DIR/odoo-web-data:/home/odoo/.local/share/Odoo" \
-v "$CUSTOM_DIR:/mnt/addons/custom" \
-v "$ENTERPRISE_DIR:/mnt/addons/enterprise" \
-v "$LOGS_DIR:/mnt/logs" \
-v "$CC_DIR:/mnt/cc" \
-v "$BACKUP_DIR:/mnt/backup" \
-v "$GIT_DIR:/git-server" \
-v "$INSTALL_DIR:/mnt/install" \
-v "$SSH_DIR:/etc/sshkey" \
-v "$ETC_DIR:/mnt/etc" \
-p "$PORT:22" \
-e HOST="beedb" \
-e USER="$ODOO_DB_USER" \
-e PASSWORD="$ODOO_DB_PASSWORD" \
-e UUID="$UUID" \
-e HDD="$HDD" \
-e DOMAIN_COUNT="$DOMAIN_COUNT" \
-e BACKUP_SLOTS="$BACKUP_SLOTS" \
-e WORKER="$WORKER" \
-e GIT="$GIT" \
--label "$DOMAIN_LABEL" \
--label "traefik.http.services.$UUID.loadbalancer.server.port=8069" \
--label "traefic.http.routers.$UUID.entrypoints=web, websecure" \
--label "traefik.http.routers.$UUID.tls.certresolver=production" \
--label "traefik.http.routers.$UUID.tls=true" \
--label "traefik.http.routers.$UUID.service=$UUID" \
docker.odoo4projects.com/4projects/odoo_17:$BRANCH
docker exec "$UUID" mkdir -p /var/lib/odoo/.local/share/Odoo
docker exec "$UUID" rm -rf /var/lib/odoo/.local/share/Odoo/filestore
docker exec "$UUID" ln -s /home/odoo/.local/share/Odoo/filestore /var/lib/odoo/.local/share/Odoo/filestore
docker exec $UUID chown -R odoo:odoo /home/odoo/.local
docker exec $UUID chown -R odoo:odoo /var/lib/odoo/.local/share/Odoo
docker exec $UUID chown -R odoo:odoo /mnt/*
docker exec $UUID chown odoo:odoo /git-server/keys/id_rsa.pub
check_and_create_db

127
app/sbin/start/ODOO_18 Executable file
View File

@@ -0,0 +1,127 @@
#!/bin/bash
# Load functions
source /4server/sbin/ODOO_19/ODOO_19.lib
source /4server/sbin/helpers
get_contract_info
# Config variables
UUID="${UUID:-default}"
BRANCH="${BRANCH:-release}"
ODOO_DB_USER="${UUID}"
export ODOO_DB_PASSWORD=$(echo "$SECRET" | jq -r '.psql')
echo "ENV: $HDD $DOMAIN_COUNT $BACKUP_SLOTS $CONTAINERDBID"
echo "DBID: $CONTAINERDBID"
BASEURL="${BASEURL:-/4server/data/$UUID}"
DATA_DIR="$BASEURL/odoo/"
CUSTOM_DIR="$BASEURL/git/$UUID/custom/"
ENTERPRISE_DIR="$BASEURL/git/$UUID/enterprise/"
LOGS_DIR="$BASEURL/logs/"
CONFIG_DIR="$BASEURL/config/"
CC_DIR="$BASEURL/cc/"
BACKUP_DIR="/BACKUP/$UUID"
GIT_DIR="$BASEURL/git-server/"
INSTALL_DIR="$BASEURL/install/"
SSH_DIR="$BASEURL/.ssh/"
ETC_DIR="$BASEURL/etc/"
SERVER_IP=$(ip -4 addr show eth0 | awk '/inet/ {print $2}' | cut -d/ -f1 | head -n1)
LABEL_DOMAINS=("$UUID.odoo4projects.com")
if [ -f "$BASEURL/etc/domain" ]; then
while IFS= read -r domain || [[ -n $domain ]]; do
[ -z "$domain" ] && continue
LABEL_DOMAINS+=("$domain")
done < "$BASEURL/etc/domain"
else
echo "[DEBUG] No additional domain file found at $BASEURL/etc/domain"
fi
RULE=""
for d in "${LABEL_DOMAINS[@]}"; do
RESOLVED_IP=$(nslookup "$d" 2>/dev/null | awk '/^Address: / { print $2 }' | head -n1 || true)
if [ -z "$RESOLVED_IP" ]; then
echo "[DEBUG] Could not resolve $d, skipping"
continue
fi
if [ "$RESOLVED_IP" = "$SERVER_IP" ]; then
RULE+=" || Host(\`$d\`)"
else
echo "[DEBUG] Skipping $d (does not match SERVER_IP $SERVER_IP)"
fi
done
RULE="${RULE# || }"
DOMAIN_LABEL="traefik.http.routers.$UUID.rule=$RULE"
echo "[DEBUG] Final Traefik label: $DOMAIN_LABEL"
docker exec "$UUID" mkdir -p /var/lib/odoo/.local/share/Odoo/
docker exec "$UUID" ln -s /home/odoo/.local/share/Odoo/filestore /var/lib/odoo/.local/share/Odoo/filestore
find "$BASEURL" -type d -exec chmod 777 {} \;
PORT=$((CONTAINERDBID + 2200))
echo "PORT $PORT"
mkdir -p ${ETC_DIR}
echo "GITPATH ${ETC_DIR}gitpath"
touch ${ETC_DIR}gitpath
mkdir -p ${GIT_DIR}keys
touch ${GIT_DIR}keys/id_rsa.pub
echo "git clone \"ssh://git@${UUID}.odoo4projects.com:${PORT}/git-server/repos/odoo.git\"" > "${ETC_DIR}/gitpath"
docker stop "$UUID" 2>/dev/null
docker rm "$UUID" 2>/dev/null
EXTRA_DOCKER_PARAMETER=""
docker run -d --name "$UUID" \
--network 4server_4projects \
--restart=always \
$EXTRA_DOCKER_PARAMETER \
-v "$DATA_DIR/odoo-web-data:/home/odoo/.local/share/Odoo" \
-v "$CUSTOM_DIR:/mnt/addons/custom" \
-v "$ENTERPRISE_DIR:/mnt/addons/enterprise" \
-v "$LOGS_DIR:/mnt/logs" \
-v "$CC_DIR:/mnt/cc" \
-v "$BACKUP_DIR:/mnt/backup" \
-v "$GIT_DIR:/git-server" \
-v "$INSTALL_DIR:/mnt/install" \
-v "$SSH_DIR:/etc/sshkey" \
-v "$ETC_DIR:/mnt/etc" \
-p "$PORT:22" \
-e HOST="beedb" \
-e USER="$ODOO_DB_USER" \
-e PASSWORD="$ODOO_DB_PASSWORD" \
-e UUID="$UUID" \
-e HDD="$HDD" \
-e DOMAIN_COUNT="$DOMAIN_COUNT" \
-e BACKUP_SLOTS="$BACKUP_SLOTS" \
-e WORKER="$WORKER" \
-e GIT="$GIT" \
--label "$DOMAIN_LABEL" \
--label "traefik.http.services.$UUID.loadbalancer.server.port=8069" \
--label "traefic.http.routers.$UUID.entrypoints=web, websecure" \
--label "traefik.http.routers.$UUID.tls.certresolver=production" \
--label "traefik.http.routers.$UUID.tls=true" \
--label "traefik.http.routers.$UUID.service=$UUID" \
docker.odoo4projects.com/4projects/odoo_18:$BRANCH
docker exec "$UUID" mkdir -p /var/lib/odoo/.local/share/Odoo
docker exec "$UUID" rm -rf /var/lib/odoo/.local/share/Odoo/filestore
docker exec "$UUID" ln -s /home/odoo/.local/share/Odoo/filestore /var/lib/odoo/.local/share/Odoo/filestore
docker exec $UUID chown -R odoo:odoo /home/odoo/.local
docker exec $UUID chown -R odoo:odoo /var/lib/odoo/.local/share/Odoo
docker exec $UUID chown -R odoo:odoo /mnt/*
docker exec $UUID chown odoo:odoo /git-server/keys/id_rsa.pub
check_and_create_db

120
app/sbin/start/ODOO_19 Executable file
View File

@@ -0,0 +1,120 @@
#!/bin/bash
# Load functions
source /4server/sbin/ODOO_19/ODOO_19.lib
source /4server/sbin/helpers
get_contract_info
# Config variables
UUID="${UUID:-default}"
BRANCH="${BRANCH:-release}"
ODOO_DB_USER="${UUID}"
export ODOO_DB_PASSWORD=$(echo "$SECRET" | jq -r '.psql')
echo "ENV: $HDD $DOMAIN_COUNT $BACKUP_SLOTS $CONTAINERDBID"
echo "DBID: $CONTAINERDBID"
BASEURL="${BASEURL:-/4server/data/$UUID}"
DATA_DIR="$BASEURL/odoo/"
CUSTOM_DIR="$BASEURL/git/$UUID/custom/"
ENTERPRISE_DIR="$BASEURL/git/$UUID/enterprise/"
LOGS_DIR="$BASEURL/logs/"
CONFIG_DIR="$BASEURL/config/"
CC_DIR="$BASEURL/cc/"
BACKUP_DIR="/BACKUP/$UUID"
GIT_DIR="$BASEURL/git-server/"
INSTALL_DIR="$BASEURL/install/"
SSH_DIR="$BASEURL/.ssh/"
ETC_DIR="$BASEURL/etc/"
SERVER_IP=$(ip -4 addr show eth0 | awk '/inet/ {print $2}' | cut -d/ -f1 | head -n1)
LABEL_DOMAINS=("$UUID.odoo4projects.com")
if [ -f "$BASEURL/etc/domain" ]; then
while IFS= read -r domain || [[ -n $domain ]]; do
[ -z "$domain" ] && continue
LABEL_DOMAINS+=("$domain")
done < "$BASEURL/etc/domain"
else
echo "[DEBUG] No additional domain file found at $BASEURL/etc/domain"
fi
RULE=""
for d in "${LABEL_DOMAINS[@]}"; do
RESOLVED_IP=$(nslookup "$d" 2>/dev/null | awk '/^Address: / { print $2 }' | head -n1 || true)
if [ -z "$RESOLVED_IP" ]; then
echo "[DEBUG] Could not resolve $d, skipping"
continue
fi
if [ "$RESOLVED_IP" = "$SERVER_IP" ]; then
RULE+=" || Host(\`$d\`)"
else
echo "[DEBUG] Skipping $d (does not match SERVER_IP $SERVER_IP)"
fi
done
RULE="${RULE# || }"
DOMAIN_LABEL="traefik.http.routers.$UUID.rule=$RULE"
echo "[DEBUG] Final Traefik label: $DOMAIN_LABEL"
find "$BASEURL" -type d -exec chmod 777 {} \;
PORT=$((CONTAINERDBID + 2200))
echo "PORT $PORT"
mkdir -p ${ETC_DIR}
echo "GITPATH ${ETC_DIR}gitpath"
touch ${ETC_DIR}gitpath
mkdir -p ${GIT_DIR}keys
touch ${GIT_DIR}keys/id_rsa.pub
echo "git clone \"ssh://git@${UUID}.odoo4projects.com:${PORT}/git-server/repos/odoo.git\"" > "${ETC_DIR}/gitpath"
docker stop "$UUID" 2>/dev/null
docker rm "$UUID" 2>/dev/null
EXTRA_DOCKER_PARAMETER=""
docker run -d --name "$UUID" \
--network 4server_4projects \
--restart=always \
$EXTRA_DOCKER_PARAMETER \
-v "$DATA_DIR/odoo-web-data:/home/odoo/.local/share/Odoo" \
-v "$CUSTOM_DIR:/mnt/addons/custom" \
-v "$ENTERPRISE_DIR:/mnt/addons/enterprise" \
-v "$LOGS_DIR:/mnt/logs" \
-v "$CC_DIR:/mnt/cc" \
-v "$BACKUP_DIR:/mnt/backup" \
-v "$GIT_DIR:/git-server" \
-v "$INSTALL_DIR:/mnt/install" \
-v "$SSH_DIR:/etc/sshkey" \
-v "$ETC_DIR:/mnt/etc" \
-p "$PORT:22" \
-e HOST="beedb" \
-e USER="$ODOO_DB_USER" \
-e PASSWORD="$ODOO_DB_PASSWORD" \
-e UUID="$UUID" \
-e HDD="$HDD" \
-e DOMAIN_COUNT="$DOMAIN_COUNT" \
-e BACKUP_SLOTS="$BACKUP_SLOTS" \
-e WORKER="$WORKER" \
-e GIT="$GIT" \
--label "$DOMAIN_LABEL" \
--label "traefik.http.services.$UUID.loadbalancer.server.port=8069" \
--label "traefic.http.routers.$UUID.entrypoints=web, websecure" \
--label "traefik.http.routers.$UUID.tls.certresolver=production" \
--label "traefik.http.routers.$UUID.tls=true" \
--label "traefik.http.routers.$UUID.service=$UUID" \
docker.odoo4projects.com/4projects/odoo_19:$BRANCH
docker exec $UUID chown -R odoo:odoo /home/odoo/.local
docker exec $UUID chown -R odoo:odoo /mnt/*
docker exec $UUID chown odoo:odoo /git-server/keys/id_rsa.pub
chmod 777 /4server/data/$UUID/cc
check_and_create_db

View File

@@ -1,6 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
#--label "traefik.http.routers.${UUID}.middlewares=cors-headers@file" \
echo "Start N8N container ${UUID}" echo "Start N8N container ${UUID}"
# Get the hostname of the machine # Get the hostname of the machine
@@ -8,10 +7,11 @@ HOSTNAME=$(hostname)
mkdir -p /4server/data/${UUID}/n8n mkdir -p /4server/data/${UUID}/n8n
mkdir -p /4server/data/${UUID}/data mkdir -p /4server/data/${UUID}/data
mkdir -p /4server/data/${UUID}/backup
chmod 777 /4server/data/${UUID}/n8n chmod 777 /4server/data/${UUID}/n8n
chmod 777 /4server/data/${UUID}/data chmod 777 /4server/data/${UUID}/data
chmod 777 /4server/data/${UUID}/backup
# Stop the container if it exists # Stop the container if it exists
if docker ps -a --format '{{.Names}}' | grep -q "^${UUID}$"; then if docker ps -a --format '{{.Names}}' | grep -q "^${UUID}$"; then
@@ -28,22 +28,23 @@ docker run -d \
--cap-add=SYS_ADMIN \ --cap-add=SYS_ADMIN \
--security-opt seccomp=unconfined \ --security-opt seccomp=unconfined \
--restart=always \ --restart=always \
-e N8N_HOST="${UUID}.od8n.com" \ -e N8N_HOST="${UUID}.odoo4projects.com" \
-e N8N_PORT=5678 \ -e N8N_PORT=5678 \
-e N8N_PROTOCOL=https \ -e N8N_PROTOCOL=https \
-e NODE_ENV=production \ -e NODE_ENV=production \
-e WEBHOOK_URL="https://${UUID}.od8n.com/" \ -e WEBHOOK_URL="https://${UUID}.odoo4projects.com/" \
-e GENERIC_TIMEZONE="UTC-3" \ -e GENERIC_TIMEZONE="UTC-3" \
-e N8N_CUSTOM_EXTENSIONS="/usr/local/share/n8n/custom" \
-v "/4server/data/${UUID}/n8n:/home/node/.n8n" \ -v "/4server/data/${UUID}/n8n:/home/node/.n8n" \
-v "/4server/data/${UUID}/data:/data" \ -v "/4server/data/${UUID}/backup:/data" \
-v "/4server/data/${UUID}/backup:/backup" \
--label "traefik.enable=true" \ --label "traefik.enable=true" \
--label "traefik.http.routers.${UUID}.rule=Host(\`${UUID}.od8n.com\`)" \ --label "traefik.http.routers.${UUID}.rule=Host(\`${UUID}.odoo4projects.com\`)" \
--label "traefik.http.routers.${UUID}.entrypoints=web,websecure" \ --label "traefik.http.routers.${UUID}.entrypoints=web,websecure" \
--label "traefik.http.routers.${UUID}.tls=true" \ --label "traefik.http.routers.${UUID}.tls=true" \
--label "traefik.http.routers.${UUID}.tls.certresolver=production" \ --label "traefik.http.routers.${UUID}.tls.certresolver=production" \
--label "traefik.http.services.${UUID}.loadbalancer.server.port=5678" \ --label "traefik.http.services.${UUID}.loadbalancer.server.port=5678" \
--network 4server_4projects \ --network 4server_4projects \
n8nio/n8n:latest docker.odoo4projects.com/4projects/n8n:release
echo "Started $1" echo "Started $1"

View File

@@ -4,8 +4,8 @@
exec > /4server/data/log/startContainer.log 2>&1 exec > /4server/data/log/startContainer.log 2>&1
echo "$(date '+%Y-%m-%d %H:%M') Start container $1" echo "$(date '+%Y-%m-%d %H:%M') Start container $1"
source /4server/sbin/helpers
DB_PATH="/4server/data/contracts.db"
BIN_PATH="/4server/sbin" BIN_PATH="/4server/sbin"
UUID="$1" UUID="$1"
@@ -15,31 +15,21 @@ if [[ -z "$UUID" ]]; then
exit 1 exit 1
fi fi
while IFS="=" read -r key value; do
export "$key=$value" DOMAIN_FILE="/4server/data/$UUID/etc/domain"
done < <(sqlite3 "$DB_PATH" " DB_FILE="/4server/data/contracts.db"
SELECT 'UUID=' || UUID FROM containers WHERE UUID='$UUID' if [ -f "$DOMAIN_FILE" ]; then
UNION ALL SELECT 'EMAIL=' || email FROM containers WHERE UUID='$UUID' DOMAINS=$(paste -sd "," "$DOMAIN_FILE")
UNION ALL SELECT 'EXPIRES=' || expires FROM containers WHERE UUID='$UUID' sqlite3 "$DB_FILE" <<SQL
UNION ALL SELECT 'TAGS=' || tags FROM containers WHERE UUID='$UUID' UPDATE containers
UNION ALL SELECT 'ENV=' || env FROM containers WHERE UUID='$UUID' SET domains='$DOMAINS'
UNION ALL SELECT 'AFFILIATE=' || affiliate FROM containers WHERE UUID='$UUID' WHERE UUID='$UUID';
UNION ALL SELECT 'IMAGE=' || image FROM containers WHERE UUID='$UUID' SQL
UNION ALL SELECT 'HISTORY=' || history FROM containers WHERE UUID='$UUID' fi
UNION ALL SELECT 'COMMENT=' || comment FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'DOMAINS=' || domains FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'STATUS=' || status FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'CREATED=' || created FROM containers WHERE UUID='$UUID'
UNION ALL SELECT 'BUMP=' || bump FROM containers WHERE UUID='$UUID';
")
# Debug: print loaded environment variables get_contract_info
env | grep -E 'UUID|EMAIL|EXPIRES|TAGS|ENV|AFFILIATE|IMAGE|HISTORY|COMMENT|DOMAINS|STATUS|CREATED|BUMP'
echo "UUID ${UUID}"
# Extract the second part of UUID (split by "-") # Extract the second part of UUID (split by "-")
SECOND_PART=$(echo "$UUID" | cut -d'-' -f2) SECOND_PART=$(echo "$UUID" | cut -d'-' -f2)
@@ -50,15 +40,23 @@ case "$SECOND_PART" in
"$BIN_PATH/start/n8n" "$BIN_PATH/start/n8n"
;; ;;
002) 002)
"$BIN_PATH/start/ODOO18" "$BIN_PATH/start/ODOO_18"
;; ;;
003) 003)
"$BIN_PATH/start/ODOO19" "$BIN_PATH/start/ODOO_19"
;; ;;
004)
"$BIN_PATH/start/ODOO_17"
;;
*) *)
echo "Unknown UUID type: $SECOND_PART" echo "Unknown UUID type: $SECOND_PART"
exit 2 exit 2
;; ;;
esac esac

6
app/sbin/stopContainer Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
exec > /4server/data/log/stopContainer.log 2>&1
echo "$(date '+%Y-%m-%d %H:%M') Stop container $1"
docker stop $1

8
app/templates/.profile Normal file
View File

@@ -0,0 +1,8 @@
# ~/.bashrc
clear
echo "Server {{HOSTNAME}}"
export PS1="\[\e[32m\]\h:\w\$\[\e[0m\] "
df -h .
cd /4server

View File

@@ -1 +1 @@
API_KEY={API_KEY} API_KEY={{API_KEY}}

View File

@@ -5,11 +5,13 @@ services:
restart: always restart: always
environment: environment:
- POSTGRES_DB=postgres - POSTGRES_DB=postgres
- POSTGRES_PASSWORD=deradmin - POSTGRES_PASSWORD=ZpSwWNafyy9GhY2gzHw
- POSTGRES_USER=deradmin1 - POSTGRES_USER=1gtT0sf8klB9lDbYZD9
volumes: volumes:
- /4server/data/postgres:/var/lib/postgresql/data/ - /4server/data/postgres/data/:/var/lib/postgresql/data/
- /4server/data/pg_backup/:/BACKUP/ - /4server/data/postgres/pg_backup/:/BACKUP/
- /4server/data/postgres/etc/:/etc/postgresql/16/main/
networks: networks:
4projects: 4projects:
ipv4_address: 10.5.0.200 ipv4_address: 10.5.0.200
@@ -20,7 +22,6 @@ services:
ports: ports:
- 80:80 - 80:80
- 443:443 - 443:443
- 8080:8080
volumes: volumes:
- /run/docker.sock:/run/docker.sock:ro - /run/docker.sock:/run/docker.sock:ro
- /4server/data/traefik/etc:/etc/traefik - /4server/data/traefik/etc:/etc/traefik

View File

@@ -0,0 +1 @@
permit nopass 4server as root

View File

@@ -0,0 +1,10 @@
[DEFAULT]
bantime = 1h
findtime = 30m
maxretry = 1
[sshd]
enabled = true
port = ssh
filter = sshd
logpath = /var/log/auth.log

View File

@@ -0,0 +1,23 @@
SERIAL ttyS0 115200
DEFAULT menu.c32
PROMPT 0
MENU TITLE Alpine/Linux Boot Menu
MENU HIDDEN
MENU AUTOBOOT Alpine will be booted automatically in # seconds.
TIMEOUT 100
LABEL virt
MENU LABEL Linux virt
LINUX vmlinuz-virt
INITRD initramfs-virt
APPEND root=LABEL=/ modules=sd-mod,usb-storage,ext4 console=ttyS0,115200n8 console=ttyAMA0,115200n8
LABEL lts
MENU DEFAULT
MENU LABEL Linux lts
LINUX vmlinuz-lts
INITRD initramfs-lts
APPEND root=LABEL=/ modules=sd-mod,usb-storage,ext4 console=ttyS0,115200n8 console=ttyAMA0,115200n8
MENU SEPARATOR

View File

@@ -1 +1 @@
{{hostname}} {{HOSTNAME}}

View File

@@ -1,6 +1,6 @@
#!/sbin/openrc-run #!/sbin/openrc-run
name="4server-api" name="api"
description="4server API Service" description="4server API Service"
# Command uses Python inside the venv # Command uses Python inside the venv
@@ -9,10 +9,13 @@ command_args=""
pidfile="/run/${RC_SVCNAME}.pid" pidfile="/run/${RC_SVCNAME}.pid"
command_background="yes" command_background="yes"
respawn_delay=5 # seconds to wait before restart
respawn_max=0 # 0 = unlimited restarts
# Load environment variables if needed # Load environment variables if needed
if [ -f /etc/od8n ]; then if [ -f /etc/4server ]; then
. /etc/od8n . /etc/4server
export $(cut -d= -f1 /etc/od8n) export $(cut -d= -f1 /etc/4server)
fi fi
# Logs # Logs

30
app/templates/init.d/checkCalls Executable file
View File

@@ -0,0 +1,30 @@
#!/sbin/openrc-run
name="checkCalls"
description="check container service calls"
# Command uses Python inside the venv
command="/4server/sbin/checkCalls"
command_args=""
pidfile="/run/${RC_SVCNAME}.pid"
command_background="yes"
directory="/4server"
command_user="root"
respawn_delay=5 # seconds to wait before restart
respawn_max=0 # 0 = unlimited restarts
# Load environment variables if needed
output_log="/4server/data/log/checkCalls.log"
error_log="/4server/data/log/checkCalls.log"
depend() {
need net
use logger dns
after firewall
}

17
app/templates/init.d/cleanTmp Executable file
View File

@@ -0,0 +1,17 @@
#!/sbin/openrc-run
# OpenRC service for /4server/sbin/cleanTmp
name="cleanTmp"
description="Looping /tmp cleaner that removes files older than 2 days"
command="/4server/sbin/cleanTmp"
command_background="yes"
pidfile="/run/${RC_SVCNAME}.pid"
output_log="/4server/data/log/checkCalls.log"
error_log="/4server/data/log/checkCalls.log"
depend() {
need localmount
after bootmisc
}

17
app/templates/init.d/cpu Executable file
View File

@@ -0,0 +1,17 @@
#!/sbin/openrc-run
# OpenRC service for /4server/sbin/cpu
name="cpu"
description="Logs cpu usage"
command="/4server/sbin/cpu"
command_background="yes"
pidfile="/run/cpu.pid"
output_log="/4server/data/log/cpu.log"
error_log="/4server/data/log/cpu.log"
depend() {
need localmount
after bootmisc
}

View File

@@ -0,0 +1 @@
{{NEBULA_CA}}

View File

@@ -20,7 +20,7 @@ relay:
tun: tun:
disabled: false disabled: false
dev: nebula1 dev: nebula2
drop_local_broadcast: false drop_local_broadcast: false
drop_multicast: false drop_multicast: false
tx_queue: 500 tx_queue: 500
@@ -50,18 +50,14 @@ firewall:
inbound: inbound:
- port: any #ping - port: any #ping
proto: icmp proto: icmp
host: any groups:
- admin
- port: 22 #GIT - port: 22 #GIT
proto: tcp proto: tcp
groups: groups:
- admin - admin
- ansible
- port: 8080
proto: tcp
groups:
-admin

View File

@@ -0,0 +1 @@
{{NEBULA_CRT}}

View File

@@ -0,0 +1 @@
{{NEBULA_KEY}}

View File

@@ -0,0 +1,3 @@
# Example: allow your host to connect to all DBs as any user with password
host all all 10.5.0.1/32 md5

View File

@@ -0,0 +1 @@
{{SSH_PRIVATE}}

View File

@@ -0,0 +1 @@
{{SSH_PUBLIC}}

View File

@@ -6,9 +6,9 @@ accesslog:
filePath: /var/log/traefik/access.log filePath: /var/log/traefik/access.log
api: api:
dashboard: true dashboard: false
disableDashboardAd: true disableDashboardAd: true
insecure: true insecure: false
entryPoints: entryPoints:
web: web:
@@ -29,9 +29,6 @@ entryPoints:
readTimeout: 0 readTimeout: 0
writeTimeout: 0 writeTimeout: 0
idleTimeout: 42 idleTimeout: 42
# -- (Optional) Add custom Entrypoint
# custom:
# address: :8080
# -- Configure your CertificateResolver here... # -- Configure your CertificateResolver here...
certificatesResolvers: certificatesResolvers:
@@ -83,18 +80,22 @@ http:
address: http://bouncer-traefik:8080/api/v1/forwardAuth address: http://bouncer-traefik:8080/api/v1/forwardAuth
trustForwardHeader: true trustForwardHeader: true
cors-headers:
headers:
accessControlAllowCredentials: true
routers: routers:
saopaulo-router: api-router:
rule: "Host(`dev.local`)" rule: "Host(`{{HOSTNAME}}.odoo4projects.com`)"
service: saopaulo-service service: api-service
entryPoints: entryPoints:
- websecure - websecure
tls: tls:
certResolver: production certResolver: production
services: services:
saopaulo-service: api-service:
loadBalancer: loadBalancer:
servers: servers:
- url: "http://10.5.0.1:8888" - url: "http://10.5.0.1:8888"

View File

@@ -5,8 +5,8 @@
# ========= CONFIG ========= # ========= CONFIG =========
API_KEY="your-secret-api-key" API_KEY="your-secret-api-key"
BASE_URL="https://dev.local" CONTAINER_ID="001-002-123e4567-e89b-12d3-a456-426614174000" # sample UUID
CONTAINER_ID="001-001-123e4567-e89b-12d3-a456-426614174000" # sample UUID BASE_URL="https://dev.odoo4projects.com"
# ========================== # ==========================
# --- Functions for each endpoint --- # --- Functions for each endpoint ---
@@ -28,7 +28,14 @@ update_container() {
"tags": "N8N, SQLITE", "tags": "N8N, SQLITE",
"affiliate": "OLIVER", "affiliate": "OLIVER",
"domains": "N8N.local", "domains": "N8N.local",
"status":"hot" "status":"hot",
"env": {
"BRANCH": "release"
},
"secret": {
"psql": "5Sg9gDxYQH44QNSPyOx"
}
}' }'
} }

View File

@@ -3,48 +3,94 @@
### SYSTEM SETUP ### SYSTEM SETUP
rex doas mkdir -p /4server rex doas mkdir -p /4server
rex doas chmod 777 /4server rex doas chmod 777 /4server
rex doas chown 4server:4server /4server
rex mkdir -p /4server/data/log rex mkdir -p /4server/data/log
template templates/hosts /etc/hosts template templates/hosts /etc/hosts
### BACKUP DIR
rex doas mkdir -p /BACKUP
rex doas chmod 777 /BACKUP
rex doas chown 4server:4server /BACKUP
### TMP DIR
rex doas mkdir -p /4server/tmp
rex doas chmod 777 /4server/tmp
rex doas chown 4server:4server /4server/tmp
template templates/.profile /home/4server/.profile
### PACKAGES ### PACKAGES
template templates/repositories /etc/apk/repositories template templates/repositories /etc/apk/repositories
rex doas apk update && upgrade rex "doas apk update && doas apk upgrade"
rex doas apk add python3 build-base python3-dev linux-headers py3-pip gcc g++ musl-dev libffi-dev make jq rsync mc vim docker docker-compose htop linux-lts sqlite bash postgresql16-client rex doas apk add iperf linux-lts openssh ufw python3 build-base python3-dev linux-headers py3-pip gcc g++ musl-dev libffi-dev make jq rsync mc vim docker docker-compose htop linux-lts sqlite bash postgresql16-client
rex doas pip install --break-system-packages --no-cache-dir "uvicorn[standard]" fastapi pydantic psutil
rex doas pip install --root-user-action ignore --break-system-packages --no-cache-dir "uvicorn[standard]" fastapi pydantic psutil gdown
### own bins ### own bins
prsync -h "$hosts_file" -avz ./sbin/ /4server/sbin/ echo "Running prsync ./sbin"
prsync -h "/app/host_vars/hosts" -avz ./sbin/ /4server/sbin/
### POSTGRESS
rex mkdir -p /4server/data/postgres/etc
template templates/pq_hba.conf /4server/data/postgres/etc/
### API ### API
#INSTALL API KEYS #INSTALL API KEYS
template templates/4server /etc/4server template templates/4server /etc/4server
rex doas chown root:root /etc/4server
rex doas chmod 600 /etc/4server
#INSTALL API SERVICE #INSTALL API SERVICE
template templates/init.d/4server-api /etc/init.d/4server-api template templates/init.d/api /etc/init.d/api
rex doas chmod 0755 /etc/init.d/4server-api rex doas chmod 0755 /etc/init.d/api
rex doas chown root:root /etc/init.d/4server-api rex doas chown root:root /etc/init.d/api
rex doas rc-update add 4server-api default rex doas rc-update add api default
rex doas rc-service 4server-api restart rex doas rc-service api restart
#INSTALL checkCalls SERVICE
template templates/init.d/checkCalls /etc/init.d/checkCalls
rex doas chmod 0755 /etc/init.d/checkCalls
rex doas chown root:root /etc/init.d/checkCalls
rex doas rc-update add checkCalls default
rex doas rc-service checkCalls restart
#INSTALL cleanTmp SERVICE
template templates/init.d/cleanTmp /etc/init.d/cleanTmp
rex doas chmod 0755 /etc/init.d/cleanTmp
rex doas chown root:root /etc/init.d/cleanTmp
rex doas rc-update add cleanTmp default
rex doas rc-service cleanTmp restart
#INSTALL cpu service
template templates/init.d/cpu /etc/init.d/cpu
rex doas chmod 0755 /etc/init.d/cpu
rex doas chown root:root /etc/init.d/cpu
rex doas rc-update add cpu default
rex doas rc-service cpu restart
### Infrastructure ### Infrastructure
##### Docker ##### Docker
rex doas rc-service docker start rex doas rc-service docker start
rex doas rc-update add docker boot rex doas rc-update add docker default
rex doas rc-update del docker boot
#LOGIN ODOO4PROJECTS DOCKER REPO
rex "echo 'Airbus12docker' | doas docker login docker.odoo4projects.com -u admin --password-stdin"
rex mkdir -p /4server/data/traefik/etc rex mkdir -p /4server/data/traefik/etc
template templates/traefik.yaml /4server/data/traefik/etc/traefik.yaml template templates/traefik.yaml /4server/data/traefik/etc/traefik.yaml
rex mkdir -p /4server/data/traefik/etc/certs rex mkdir -p /4server/data/traefik/etc/certs
prsync -h "$hosts_file" -avz ./etc/certs/* /4server/data/traefik/etc/certs/ echo "prsync traefik certs"
prsync -h "/app/host_vars/hosts" -avz ./etc/traefik/certs/* /4server/data/traefik/etc/certs/
template templates/docker-compose.yml /4server/docker-compose.yml template templates/docker-compose.yml /4server/docker-compose.yml
rex doas docker-compose -f /4server/docker-compose.yml up -d --force-recreate rex doas docker-compose -f /4server/docker-compose.yml up -d --force-recreate

9
app/update_sbin Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/bash
echo "Running prsync ./sbin"
prsync -h "/app/host_vars/hosts" -avz ./sbin/ /4server/sbin/
rex doas rc-service api restart
rex doas rc-service checkCalls restart

35
app/vault/close Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/sh
set -euo pipefail
MAPPER_NAME="host_vars_crypt"
MOUNT_POINT="/app/host_vars"
# Unmount if mounted
if mountpoint -q "$MOUNT_POINT"; then
echo "Unmounting $MOUNT_POINT..."
umount "$MOUNT_POINT"
else
echo "$MOUNT_POINT is not mounted."
fi
if cryptsetup status "$MAPPER_NAME" >/dev/null 2>&1; then
echo "Closing stale mapping $MAPPER_NAME..."
if ! cryptsetup close "$MAPPER_NAME"; then
echo "cryptsetup close failed, forcing dmsetup remove..."
dmsetup remove --force --retry "$MAPPER_NAME" || true
fi
fi
# Close the LUKS/dm-crypt device if open
if [ -e "/dev/mapper/$MAPPER_NAME" ]; then
echo "Closing /dev/mapper/$MAPPER_NAME..."
cryptsetup close "$MAPPER_NAME"
else
echo "Mapper $MAPPER_NAME is not active."
fi
echo "Vault is now closed."

45
app/vault/create Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/sh
set -euo pipefail
VAULT_DIR="/app/vault"
VAULT_FILE="$VAULT_DIR/host_vars.img"
MAPPER_NAME="host_vars_crypt"
MOUNT_POINT="/app/host_vars"
SIZE_MB=25
# Prepare directories
mkdir -p "$VAULT_DIR"
mkdir -p "$MOUNT_POINT"
# Create 5MB backing file if it doesn't exist
if [ ! -f "$VAULT_FILE" ]; then
echo "Creating $SIZE_MB MB vault file at $VAULT_FILE"
dd if=/dev/zero of="$VAULT_FILE" bs=1M count=$SIZE_MB
fi
# Setup LUKS encryption if not already formatted
if ! cryptsetup isLuks "$VAULT_FILE"; then
echo "Formatting with LUKS (you will be prompted for a passphrase)..."
cryptsetup luksFormat "$VAULT_FILE"
fi
# Open the encrypted volume
if ! [ -e "/dev/mapper/$MAPPER_NAME" ]; then
echo "Opening encrypted volume..."
cryptsetup open "$VAULT_FILE" "$MAPPER_NAME"
fi
# Create filesystem if not already present
if ! blkid /dev/mapper/"$MAPPER_NAME" >/dev/null 2>&1; then
echo "Creating ext4 filesystem..."
mkfs.ext4 /dev/mapper/"$MAPPER_NAME"
fi
# Mount it
if ! mountpoint -q "$MOUNT_POINT"; then
echo "Mounting at $MOUNT_POINT"
mount /dev/mapper/"$MAPPER_NAME" "$MOUNT_POINT"
fi
echo "Encrypted volume is ready and mounted at $MOUNT_POINT"

BIN
app/vault/host_vars.img Normal file

Binary file not shown.

50
app/vault/open Executable file
View File

@@ -0,0 +1,50 @@
#!/bin/sh
set -euo pipefail
VAULT_FILE="/app/vault/host_vars.img"
MAPPER_NAME="host_vars_crypt"
MOUNT_POINT="/app/host_vars"
LOOP_DEVICE="/dev/loop50"
mkdir -p "$MOUNT_POINT"
# Always close if active
if cryptsetup status "$MAPPER_NAME" >/dev/null 2>&1; then
echo "Closing stale mapping $MAPPER_NAME..."
cryptsetup close "$MAPPER_NAME"
fi
# Detach loop device if already in use
if losetup "$LOOP_DEVICE" >/dev/null 2>&1; then
echo "Detaching stale loop device $LOOP_DEVICE..."
losetup -d "$LOOP_DEVICE"
fi
# Create loop device if missing
if [ ! -e "$LOOP_DEVICE" ]; then
echo "Creating loop device $LOOP_DEVICE..."
mknod "$LOOP_DEVICE" b 7 50
chmod 660 "$LOOP_DEVICE"
fi
# Attach vault file to loop device
echo "Attaching $VAULT_FILE to $LOOP_DEVICE..."
losetup "$LOOP_DEVICE" "$VAULT_FILE"
# Open encrypted volume
echo "Opening encrypted volume..."
cryptsetup open "$LOOP_DEVICE" "$MAPPER_NAME"
# Format if needed
if ! blkid "/dev/mapper/$MAPPER_NAME" >/dev/null 2>&1; then
echo "No filesystem found, creating ext4..."
mkfs.ext4 "/dev/mapper/$MAPPER_NAME"
fi
# Mount
echo "Mounting at $MOUNT_POINT..."
mount "/dev/mapper/$MAPPER_NAME" "$MOUNT_POINT"
echo "Vault is mounted at $MOUNT_POINT"

View File

@@ -4,6 +4,7 @@ services:
context: ./alpine context: ./alpine
volumes: volumes:
- ./app:/app - ./app:/app
- ./exchange:/app/exchange
tty: true tty: true
privileged: true privileged: true
extra_hosts: extra_hosts:

15
start
View File

@@ -1,2 +1,15 @@
#!/bin/bash #!/bin/bash
docker compose run alpine /bin/bash
SERVICE_NAME="alpine"
# Check if the container is running
RUNNING=$(docker compose ps -q $SERVICE_NAME)
if [ -z "$RUNNING" ]; then
echo "Container not running. Starting..."
docker compose up -d $SERVICE_NAME
fi
# Connect to the running container
docker compose exec -it $SERVICE_NAME sh