Compare commits

...

No commits in common. "test" and "main" have entirely different histories.
test ... main

29 changed files with 1622 additions and 0 deletions

167
Ansible/playbook-pi3.yml Executable file
View file

@ -0,0 +1,167 @@
---
#
#Configuration du pi5 from scratch
#
#pré-requis :
#ssh configuré
#
#
- name: Configuration du pi3 from scratch
hosts: pi3
remote_user: griffix
roles:
- geerlingguy.docker
become: true
tasks:
#ajout du groupe docker
- name: Création du groupe docker
ansible.builtin.group:
name: docker
state: present
become: true
- name: Ensure group "somegroup" exists
ansible.builtin.group:
name: media
state: present
become: true
#Ajout du user griffix aux groupes docker et media
- name: Ajout du user griffix au groupe docker
ansible.builtin.user:
name: griffix
groups: docker,media
become: true
# #mount 8Tb HDD externe
# #UUID=8d9ea59e-4f9a-48d8-94bc-9604f7c131a4 /media/Seagate ext4 defaults,auto,users,rw,nofail,noatime 0 0
# - name: Mount up device by UUID
# ansible.posix.mount:
# path: /media/Seagate
# src: UUID=8d9ea59e-4f9a-48d8-94bc-9604f7c131a4
# fstype: ext4
# opts: defaults,auto,users,rw,nofail,noatime
# state: present
# become: true
# #mount 500 Gb SSD
# #UUID=6177babe-d97c-4837-a326-1e557a5a3119 /temp ext4 defaults,auto,users,rw,nofail,noatime 0 0
# - name: Mount up device by UUID
# ansible.posix.mount:
# path: /temp
# src: UUID=6177babe-d97c-4837-a326-1e557a5a3119
# fstype: ext4
# opts: defaults,auto,users,rw,nofail,noatime
# state: present
# become: true
#reboot pour prendre en compte les disques
# - name: Reboot machine and send a message
# ansible.builtin.reboot:
# msg: "Reboot du pi5 dans 5 secondes pour prise en compte des ajouts de disques en Fstab"
# become: true
- name: apt update d'abord
ansible.builtin.apt:
update_cache: yes
become: true
- name: Update des packages déjà installés
ansible.builtin.apt:
name: "*"
state: latest
become: true
- name: Installation des packages qui vont bien
ansible.builtin.apt:
pkg:
- fish
- docker
- btop
- vim
- nfs-kernel-server
become: true
#confort en cas de log en local
- name: update profile root
lineinfile:
dest: ~/.profile
state: present
line: "alias ll='ls -alrth'"
become: true
- name: update profile griffix
lineinfile:
dest: ~/.profile
state: present
line: "alias ll='ls -alrth'"
# #Share Seagate
# - name: Share seagate
# lineinfile:
# dest: /etc/exports
# state: present
# line: "/media/Seagate 192.168.1.0/24(rw,all_squash,insecure,async,no_subtree_check,anonuid=1000,anongid=1001)"
# become: true
# #share Temp
# - name: share temp
# lineinfile:
# dest: /etc/exports
# state: present
# line: "/temp 192.168.1.0/24(rw,all_squash,insecure,async,no_subtree_check,anonuid=1000,anongid=1001)"
# become: true
# - name: Reboot machine and send a message
# ansible.builtin.reboot:
# msg: "Reboot du pi5 dans 5 secondes pour prise en compte des installations et"
# become: true
# - name: Copy file with owner and permissions
# ansible.builtin.copy:
# src: /media/Seagate/Ansible_conf/config/
# dest: /config
# owner: griffix
# group: media
# mode: '0766'
# - name: Create directory for Docker CLI plugins
# file:
# path: /usr/local/lib/docker/cli-plugins
# state: directory
# mode: '0755'
# become: true
# - name: Download Docker Compose V2 binary
# get_url:
# url: https://github.com/docker/compose/releases/download/v2.34.0/docker-compose-linux-aarch64
# dest: /usr/local/lib/docker/cli-plugins/docker-compose
# mode: '0755'
# become: true
# - name: Make Docker Compose binary executable
# file:
# path: /usr/local/lib/docker/cli-plugins/docker-compose
# mode: '0755'
# become: true
# - name: Verify Docker Compose installation
# command: docker compose version
# register: docker_compose_version
# ignore_errors: yes
# - name: Show Docker Compose version
# debug:
# msg: "{{ docker_compose_version.stdout }}"
# when: docker_compose_version.rc == 0
- name: Create and deploy docker compose services
community.docker.docker_compose_v2:
project_src: /config/
register: output

24
Ansible/playbook-pi4-backup.yml Executable file
View file

@ -0,0 +1,24 @@
---
#
#Backup du pi5
#
- name: Configuration du pi5 from scratch
hosts: pi5
remote_user: griffix
tasks:
- name: stop docker compose services
community.docker.docker_compose_v2:
project_src: /config/
state: stopped
register: output
- synchronize:
src: /config/
dest: /media/Seagate/backups/pi5
delegate_to: "{{ inventory_hostname }}"
- name: re-deploy docker compose services
community.docker.docker_compose_v2:
project_src: /config/
register: output

24
Ansible/playbook-pi5-backup.yml Executable file
View file

@ -0,0 +1,24 @@
---
#
#Backup du pi5
#
- name: Configuration du pi5 from scratch
hosts: pi5
remote_user: griffix
tasks:
- name: stop docker compose services
community.docker.docker_compose_v2:
project_src: /config/
state: stopped
register: output
- synchronize:
src: /config/
dest: /media/Seagate/backups/pi5
delegate_to: "{{ inventory_hostname }}"
- name: re-deploy docker compose services
community.docker.docker_compose_v2:
project_src: /config/
register: output

48
Ansible/playbook-pi5-end.yml Executable file
View file

@ -0,0 +1,48 @@
---
#
#Configuration du pi5 from scratch
#
#pré-requis :
#ssh configuré
#
#
- name: Configuration du pi5 from scratch
hosts: pi5
remote_user: griffix
tasks:
- name: créer le /config
ansible.builtin.file:
path: /config
state: directory
owner: griffix
group: docker
mode: 0775
become: true
# - name: Copy file with owner and permissions
# ansible.builtin.copy:
# src: /media/Seagate/Ansible_conf/config/
# dest: /config/
# owner: griffix
# group: media
# mode: '0766'
- synchronize:
src: /media/Seagate/Ansible_conf/config/
dest: /config/
delegate_to: "{{ inventory_hostname }}"
become: true
- name: Recursively change ownership of a directory
ansible.builtin.file:
path: /config
state: directory
recurse: yes
owner: griffix
group: docker
- name: create and deploy docker compose services
community.docker.docker_compose_v2:
project_src: /config/
register: output

143
Ansible/playbook-pi5.yml Executable file
View file

@ -0,0 +1,143 @@
---
#
#Configuration du pi5 from scratch
#
#pré-requis :
#ssh configuré
#
#
- name: Configuration du pi5 from scratch
hosts: pi5
remote_user: griffix
roles:
- geerlingguy.docker
become: true
tasks:
#ajout du groupe docker
- name: Création du groupe docker
ansible.builtin.group:
name: docker
state: present
become: true
- name: Ensure group "somegroup" exists
ansible.builtin.group:
name: media
state: present
become: true
#Ajout du user griffix aux groupes docker et media
- name: Ajout du user griffix au groupe docker
ansible.builtin.user:
name: griffix
groups: docker,media
become: true
#mount 8Tb HDD externe
#UUID=8d9ea59e-4f9a-48d8-94bc-9604f7c131a4 /media/Seagate ext4 defaults,auto,users,rw,nofail,noatime 0 0
- name: Mount up device by UUID
ansible.posix.mount:
path: /media/Seagate
src: UUID=8d9ea59e-4f9a-48d8-94bc-9604f7c131a4
fstype: ext4
opts: defaults,auto,users,rw,nofail,noatime
state: present
become: true
#mount 500 Gb SSD
#UUID=6177babe-d97c-4837-a326-1e557a5a3119 /temp ext4 defaults,auto,users,rw,nofail,noatime 0 0
- name: Mount up device by UUID
ansible.posix.mount:
path: /temp
src: UUID=6177babe-d97c-4837-a326-1e557a5a3119
fstype: ext4
opts: defaults,auto,users,rw,nofail,noatime
state: present
become: true
#reboot pour prendre en compte les disques
- name: Reboot machine and send a message
ansible.builtin.reboot:
msg: "Reboot du pi5 dans 5 secondes pour prise en compte des ajouts de disques en Fstab"
become: true
- name: apt update d'abord
ansible.builtin.apt:
update_cache: yes
become: true
- name: Update all packages to their latest version
ansible.builtin.apt:
name: "*"
state: latest
become: true
- name: Installation des packages qui vont bien
ansible.builtin.apt:
pkg:
- fish
- btop
- vim
- nfs-kernel-server
become: true
#confort en cas de log en local
- name: update profile root avec l'alias qui va bien
lineinfile:
dest: ~/.profile
state: present
line: "alias ll='ls -alrth'"
become: true
- name: update profile griffix avec l'alias qui va bien
lineinfile:
dest: ~/.profile
state: present
line: "alias ll='ls -alrth'"
- name: update profile griffix avec l'alias qui va bien
lineinfile:
dest: ~/.profile
state: present
line: "alias dc='docker compose'"
- name: Configuration du partage de Seagate
lineinfile:
dest: /etc/exports
state: present
line: "/media/Seagate 192.168.1.0/24(rw,all_squash,insecure,async,no_subtree_check,anonuid=1000,anongid=1001)"
become: true
- name: Configuration du partage de temp
lineinfile:
dest: /etc/exports
state: present
line: "/temp 192.168.1.0/24(rw,all_squash,insecure,async,no_subtree_check,anonuid=1000,anongid=1001)"
become: true
- name: Reboot machine and send a message
ansible.builtin.reboot:
msg: "Reboot du pi5 dans 5 secondes pour prise en compte des installations et"
become: true
- synchronize:
src: /media/Seagate/backups/pi5/config/
dest: /config/
delegate_to: "{{ inventory_hostname }}"
become: true
- name: Recursively change ownership of a directory
ansible.builtin.file:
path: /config
state: directory
recurse: yes
owner: griffix
group: docker
- name: create and deploy docker compose services
community.docker.docker_compose_v2:
project_src: /config/
register: output

41
docker/pi4/docker-compose.yaml Executable file
View file

@ -0,0 +1,41 @@
---
services:
watchtower:
image: containrrr/watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
Gitea:
image: docker.gitea.com/gitea:latest
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- gitea
volumes:
- /data/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "8080:3000"
- "2221:22"
nginx:
image: nginx:latest
container_name: nginx-proxy
ports:
- "80:80"
- "443:443"
volumes:
- /config/nginx/nginx.conf:/etc/nginx/nginx.conf
- /config/nginx/certs:/etc/nginx/certs
networks:
gitea:
external: false
default:
external: true
name: nginx-proxy-net

0
docker/pi5/.env Normal file
View file

View file

@ -0,0 +1,353 @@
---
services:
harborguard:
image: ghcr.io/harborguard/harborguard:latest
container_name: harborguard
environment:
- MAX_CONCURRENT_SCANS=1
- SCAN_TIMEOUT_MINUTES=15
- ENABLED_SCANNERS=trivy,grype
- LOG_LEVEL=error
- CLEANUP_OLD_SCANS_DAYS=7
ports:
- 3000:3000
volumes:
- /var/run/docker.sock:/var/run/docker.sock
foundryvtt:
image: felddy/foundryvtt:release
environment:
- FOUNDRY_USERNAME=${FOUNDRY_USERNAME}
- FOUNDRY_PASSWORD=${FOUNDRY_PASSWORD}
- UID=1000
- GID=1000
ports:
- "30000:30000/tcp"
volumes:
- /config/foundry-data:/data
env_file:
- path: .env
required: true
dashy:
image: lissy93/dashy
container_name: Dashy
volumes:
- /config/dashy/:/app/user-data/
ports:
- 8083:8080
# Set any environmental variables
environment:
- NODE_ENV=production
- UID=1000
- GID=1000
restart: unless-stopped
healthcheck:
test: ['CMD', 'node', '/app/services/healthcheck']
interval: 1m30s
timeout: 10s
retries: 3
start_period: 40s
pihole:
container_name: pihole
image: pihole/pihole:latest
ports:
# DNS Ports
- "53:53/tcp"
- "53:53/udp"
# Default HTTP Port
- "80:80/tcp"
# Default HTTPs Port. FTL will generate a self-signed certificate
- "443:443/tcp"
# Uncomment the below if using Pi-hole as your DHCP Server
#- "67:67/udp"
# Uncomment the line below if you are using Pi-hole as your NTP server
#- "123:123/udp"
environment:
# Set the appropriate timezone for your location from
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones, e.g:
TZ: 'Europe/Paris'
# Set a password to access the web interface. Not setting one will result in a random password being assigned
FTLCONF_webserver_api_password: 'correct horse battery staple'
# If using Docker's default `bridge` network setting the dns listening mode should be set to 'all'
FTLCONF_dns_listeningMode: 'all'
# Volumes store your data between container upgrades
volumes:
# For persisting Pi-hole's databases and common configuration file
- '/config/etc-pihole:/etc/pihole'
# Uncomment the below if you have custom dnsmasq config files that you want to persist. Not needed for most starting fresh with Pi-hole v6. If you're upgrading from v5 you and have used this directory before, you should keep it enabled for the first v6 container start to allow for a complete migration. It can be removed afterwards. Needs environment variable FTLCONF_misc_etc_dnsmasq_d: 'true'
#- './etc-dnsmasq.d:/etc/dnsmasq.d'
cap_add:
# See https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
# Required if you are using Pi-hole as your DHCP server, else not needed
- NET_ADMIN
# Required if you are using Pi-hole as your NTP client to be able to set the host's system time
- SYS_TIME
# Optional, if Pi-hole should get some more processing time
- SYS_NICE
restart: unless-stopped
portainer:
image: portainer/portainer-ce:latest
container_name: portainer
restart: unless-stopped
security_opt:
- no-new-privileges:true
volumes:
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- /config/portainer-data:/data
ports:
- 9000:9000
flaresolverr:
image: ghcr.io/flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- LOG_LEVEL=${LOG_LEVEL:-info}
- LOG_HTML=${LOG_HTML:-false}
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
- TZ=Europe/Paris
ports:
- "${PORT:-8191}:8191"
restart: unless-stopped
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
volumes:
- /config/prowlarr:/config
ports:
- 9696:9696
restart: unless-stopped
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
volumes:
- /config/sonarr:/config
- /media/Seagate/Series:/media/Seagate/Series
- /media/Seagate/Animes:/media/Seagate/Animes
- /temp:/temp
ports:
- 8989:8989
restart: unless-stopped
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
volumes:
- /config/radarr:/config
- /media/Seagate/Movies:/media/Seagate/Movies
- /media/Seagate/temp:/media/Seagate/temp
- /temp:/temp
ports:
- 7878:7878
restart: unless-stopped
gluetun:
image: qmcgaw/gluetun
container_name: gluetun
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun
env_file:
- path: .env
required: true
environment:
- VPN_SERVICE_PROVIDER=protonvpn
- VPN_TYPE=wireguard
- WIREGUARD_PRIVATE_KEY=${VPNKEY}
- SERVER_COUNTRIES=Sweden
- HTTPPROXY=on
- HTTPPROXY_LOG=on
ports:
- 8888:8888
- 6881:6881
- 8080:8080
qbit:
image: ghcr.io/linuxserver/qbittorrent
container_name: qbit
volumes:
- /temp:/temp
- /config/transmission-daemon:/config
environment:
- PUID=1000
- PGID=1000
network_mode: "service:gluetun"
restart: always
depends_on:
gluetun:
condition: service_healthy
restart: true
jellyfin:
image: lscr.io/linuxserver/jellyfin:latest
container_name: jellyfin
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
- JELLYFIN_PublishedServerUrl=192.168.1.55 #optional
volumes:
- /config/jellyfin:/config
- /media/Seagate/Series:/Series
- /media/Seagate/Animes:/Animes
- /media/Seagate/Movies:/Movies
ports:
- 8096:8096
- 8920:8920 #optional
- 7359:7359/udp #optional
- 1900:1900/udp #optional
restart: unless-stopped
watchtower:
image: containrrr/watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- path: .env
required: true
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- path: .env
required: true
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:fec42f399876eb6faf9e008570597741c87ff7662a54185593e74b09ce83d177
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0
env_file:
- path: .env
required: true
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
restart: always
Forgejo:
image: codeberg.org/forgejo/forgejo:11
container_name: forgejo
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- forgejo
volumes:
- /config/forgejo:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- '3000:3000'
- '222:22'
nginx-proxy-manager:
image: jc21/nginx-proxy-manager:latest
container_name: nginx-proxy-manager
ports:
- "80:80"
- "443:443"
- "81:81"
volumes:
- /config/nginx/data:/data
- /config/nginx/letsencrypt:/etc/letsencrypt
restart: unless-stopped
freshrss:
image: lscr.io/linuxserver/freshrss:latest
container_name: freshrss
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
volumes:
- /config/freshrss/:/config
ports:
- 8082:80
restart: unless-stopped
vikunja:
image: vikunja/vikunja
environment:
VIKUNJA_SERVICE_JWTSECRET: ${VIKUNJA_SERVICE_JWTSECRET}
VIKUNJA_SERVICE_PUBLICURL: https://notes.griffix.hopto.org/
VIKUNJA_DATABASE_PATH: /db/vikunja.db
env_file:
- path: .env
required: true
ports:
- 3456:3456
volumes:
- /config/vikunja/files:/app/vikunja/files
- /config/vikunja/db:/db
restart: unless-stopped
networks:
forgejo:
external: false
#default:
# external: true
volumes:
model-cache:

301
docker/pi5/docker-compose.yaml Executable file
View file

@ -0,0 +1,301 @@
---
services:
foundryvtt:
image: felddy/foundryvtt:release
environment:
- FOUNDRY_USERNAME=${FOUNDRY_USERNAME}
- FOUNDRY_PASSWORD=${FOUNDRY_PASSWORD}
- UID=1000
- GID=1000
ports:
- "30000:30000/tcp"
volumes:
- /config/foundry-data:/data
env_file:
- path: .env
required: true
portainer:
image: portainer/portainer-ce:latest
container_name: portainer
restart: unless-stopped
security_opt:
- no-new-privileges:true
volumes:
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- /config/portainer-data:/data
ports:
- 9000:9000
flaresolverr:
image: ghcr.io/flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- LOG_LEVEL=${LOG_LEVEL:-info}
- LOG_HTML=${LOG_HTML:-false}
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
- TZ=Europe/Paris
ports:
- "${PORT:-8191}:8191"
restart: unless-stopped
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
volumes:
- /config/prowlarr:/config
ports:
- 9696:9696
restart: unless-stopped
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
volumes:
- /config/sonarr:/config
- /media/Seagate/Series:/media/Seagate/Series
- /media/Seagate/Animes:/media/Seagate/Animes
- /temp:/temp
ports:
- 8989:8989
restart: unless-stopped
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
volumes:
- /config/radarr:/config
- /media/Seagate/Movies:/media/Seagate/Movies
- /media/Seagate/temp:/media/Seagate/temp
- /temp:/temp
ports:
- 7878:7878
restart: unless-stopped
gluetun:
image: qmcgaw/gluetun
container_name: gluetun
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun
env_file:
- path: .env
required: true
environment:
- VPN_SERVICE_PROVIDER=protonvpn
- VPN_TYPE=wireguard
- WIREGUARD_PRIVATE_KEY=${VPNKEY}
- SERVER_COUNTRIES=Sweden
- HTTPPROXY=on
- HTTPPROXY_LOG=on
ports:
- 8888:8888
- 6881:6881
- 8080:8080
qbit:
image: ghcr.io/linuxserver/qbittorrent
container_name: qbit
volumes:
- /temp:/temp
- /config/transmission-daemon:/config
environment:
- PUID=1000
- PGID=1000
network_mode: "service:gluetun"
restart: always
depends_on:
gluetun:
condition: service_healthy
restart: true
jellyfin:
image: lscr.io/linuxserver/jellyfin:latest
container_name: jellyfin
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
- JELLYFIN_PublishedServerUrl=192.168.1.55 #optional
volumes:
- /config/jellyfin:/config
- /media/Seagate/Series:/Series
- /media/Seagate/Animes:/Animes
- /media/Seagate/Movies:/Movies
ports:
- 8096:8096
- 8920:8920 #optional
- 7359:7359/udp #optional
- 1900:1900/udp #optional
restart: unless-stopped
watchtower:
image: containrrr/watchtower
container_name: watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- path: .env
required: true
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- path: .env
required: true
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:fec42f399876eb6faf9e008570597741c87ff7662a54185593e74b09ce83d177
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0
env_file:
- path: .env
required: true
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
restart: always
Forgejo:
image: codeberg.org/forgejo/forgejo:11
container_name: forgejo
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- forgejo
volumes:
- /config/forgejo:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- '3000:3000'
- '222:22'
nginx-proxy-manager:
image: jc21/nginx-proxy-manager:latest
container_name: nginx-proxy-manager
ports:
- "80:80"
- "443:443"
- "81:81"
volumes:
- /config/nginx/data:/data
- /config/nginx/letsencrypt:/etc/letsencrypt
restart: unless-stopped
freshrss:
image: lscr.io/linuxserver/freshrss:latest
container_name: freshrss
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
volumes:
- /config/freshrss/:/config
ports:
- 8082:80
restart: unless-stopped
vikunja:
image: vikunja/vikunja
container_name: vikunja
environment:
VIKUNJA_SERVICE_JWTSECRET: ${VIKUNJA_SERVICE_JWTSECRET}
VIKUNJA_SERVICE_PUBLICURL: https://notes.griffix.hopto.org/
VIKUNJA_DATABASE_PATH: /db/vikunja.db
env_file:
- path: .env
required: true
ports:
- 3456:3456
volumes:
- /config/vikunja/files:/app/vikunja/files
- /config/vikunja/db:/db
restart: unless-stopped
glass-keep:
image: nikunjsingh/glass-keep:latest
container_name: glass-keep
restart: unless-stopped
env_file:
- path: .env
required: true
environment:
NODE_ENV: production
API_PORT: "8080"
JWT_SECRET: ${VIKUNJA_SERVICE_JWTSECRET}
DB_FILE: /app/data/notes.db
ADMIN_EMAILS: griffix
ports:
- "8380:8080"
volumes:
- /config/glass-keep:/app/data
networks:
forgejo:
external: false
#default:
# external: true
volumes:
model-cache:

@ -0,0 +1 @@
Subproject commit 013b790a0ca9ca4d54bf38e7c64ff4f9f6a2ab33

BIN
hs80/Corsairev2 Executable file

Binary file not shown.

BIN
hs80/corsair_working1.0 Executable file

Binary file not shown.

55
hs80/corsairev2.rs Normal file
View file

@ -0,0 +1,55 @@
use hidapi::{HidApi};
const CORSAIR_VID: u16 = 0x1B1C;
const HS80_PID: u16 = 0x0A6B;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("VID/PID : ");
println!("{}",CORSAIR_VID);
println!("{}",HS80_PID);
let api = HidApi::new()?;
let device = api.open(CORSAIR_VID, HS80_PID)?;
let mut buffer = [0u8; 64];
println!("Reading from device");
loop {
match device.read(&mut buffer) {
Ok(size) => {
/*for i in 0..size {
print!("{:02X} ", buffer[i]);
}*/
if size > 2 {
match buffer[3] {
0x0f => {
println!("Received battery change event");
let percentage = (buffer[5] as u16 | (buffer[6] as u16) << 8) as f64 / 10.0;
println!("Battery Percentage: {:.1}%", percentage);
}
0x10 => {
println!("Received charging state change event");
let charging = buffer[5] == 1;
println!("Charging: {}", charging);
}
0xA6 => {
println!("Received mic state change event");
let micro = buffer[5] == 0;
println!("Micro actif: {}", micro);
}
_ => {
println!("Unsupported event");
}
}
}
}
Err(e) => {
println!("Error reading from device: {}", e);
break;
}
}
};
Ok(())
}

View file

@ -0,0 +1,9 @@
import QtQuick
import org.kde.plasma.plasmoid
import org.kde.plasma.components as PlasmaComponents
PlasmoidItem{
PlasmaComponents.Label {
text: "Hello World!"
}
}

View file

@ -0,0 +1,20 @@
{
"KPlugin": {
"Authors": [
{
"Email": "vbriday@gmail.com",
"Name": "Victor Briday"
}
],
"Category": "System Information",
"Description": "hs80 status tray",
"Icon": "headset",
"Id": "com.example.hs80tray",
"Name": "hs80tray",
"Version": "1",
"Website": "https://example.com/user/plasmoid-helloworldplugin",
"BugReportUrl": "https://example.com/user/plasmoid-helloworldplugin/bugs"
},
"X-Plasma-API-Minimum-Version": "6.0",
"KPackageStructure": "Plasma/Applet"
}

BIN
hs80/hw Executable file

Binary file not shown.

3
hs80/hw.rs Normal file
View file

@ -0,0 +1,3 @@
fn main() {
println!("Hello world!!");
}

81
hs80/main.rs Normal file
View file

@ -0,0 +1,81 @@
use hidapi::{HidApi};
use dbus::blocking::{Connection};
use dbus::channel::Sender;
use dbus::{Message, Path};
use dbus::strings::{Interface, Member};
const CORSAIR_VID: u16 = 0x1B1C;
const HS80_PID: u16 = 0x0A6B;
struct DBusSignalSender {
connection: Connection,
path: Path<'static>,
interface: Interface<'static>
}
impl DBusSignalSender {
fn new(path: &str, interface: &str) -> Self {
let connection = Connection::new_session().expect("Failed to connect to DBus");
Self {
connection,
path: Path::new(path).expect("Invalid Path"),
interface: Interface::new(interface).expect("Invalid Interface")
}
}
fn send_update(&self, update: i32) {
let msg = Message::signal(
&self.path,
&self.interface,
&Member::new("HS80").expect("Invalid Signal name"),
).append1(update);
self.connection.send(msg).expect("Failed to send Signal");
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize the sender with borrowed string literals
let sender = DBusSignalSender::new(
"/com/h0psej0ch/corsair", // Object path
"com.h0psej0ch.corsair.Interface", // Interface
);
// Initialize the HID interface with the VID and PID of the HS-80 Headset
let api = HidApi::new()?;
let device = api.open(CORSAIR_VID, HS80_PID)?;
let mut buffer = [0u8; 64];
// Indefinitely loop and read the device
loop {
match device.read(&mut buffer) {
Ok(size) => {
if size > 2 {
match buffer[3] {
0x0f => {
let percentage = (buffer[5] as u16 | (buffer[6] as u16) << 8) as f64 / 10.0;
sender.send_update(percentage as i32);
}
0x10 => {
let charging = buffer[5] == 1;
sender.send_update(if charging {-1} else {-2});
}
_ => {}
}
}
}
Err(e) => {
println!("Error reading from device: {}", e);
break;
}
}
};
Ok(())
}

52
hs80/main_working1.0.rs Normal file
View file

@ -0,0 +1,52 @@
use hidapi::{HidApi};
const CORSAIR_VID: u16 = 0x1B1C;
const HS80_PID: u16 = 0x0A6B;
//fn main() -> Result<(), Box<dyn std::error::Error>> {
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("VID/PID : ");
println!("{}",CORSAIR_VID);
println!("{}",HS80_PID);
let api = HidApi::new()?;
let device = api.open(CORSAIR_VID, HS80_PID)?;
let mut buffer = [0u8; 64];
println!("Reading from device");
loop {
match device.read(&mut buffer) {
Ok(size) => {
for i in 0..size {
print!("{:02X} ", buffer[i]);
}
if size > 2 {
match buffer[3] {
0x0f => {
println!("Received battery change event");
let percentage = (buffer[5] as u16 | (buffer[6] as u16) << 8) as f64 / 10.0;
println!("Battery Percentage: {:.1}%", percentage);
}
0x10 => {
println!("Received charging state change event");
let charging = buffer[5] == 1;
println!("Charging: {}", charging);
}
_ => {
println!("Unsupported event");
}
}
}
}
Err(e) => {
println!("Error reading from device: {}", e);
break;
}
}
};
Ok(())
}

9
infra_conf/backup.sh Executable file
View file

@ -0,0 +1,9 @@
#!/bin/sh
DestDir="/mnt/data/backups/arch-griffix/griffix/"
rsync -avuz --exclude ".local/share/Steam" --exclude "VMs" --exclude "Games" --exclude "Téléchargements" --exclude ".cache" /home/griffix/ $DestDir
rsync -avuz /mnt/pi5data/backups/pi5 /mnt/data/backups/
rsync -avuz /mnt/pi5data/backups/immich /mnt/data/backups/
rsync -avuz --exclude ".local/share/Steam" --exclude "VMs" --exclude "Games" --exclude "Téléchargements" --exclude ".cache" /home/griffix /mnt/pi5data/backups/
exit $?

20
infra_conf/backup_pi5.sh Executable file
View file

@ -0,0 +1,20 @@
#!/bin/sh
DestDir="/media/Seagate/backups/pi5/config/"
docker image prune -f
echo "docker compose stop"
docker compose -f /config/compose.yaml stop
echo "rsync /config"
rsync -avuz --exclude "/config/jellyfin/cache/" /config/ $DestDir
echo "rsync immich"
rsync -avuz /media/Seagate/immich /media/Seagate/backups/immich
echo "docker compose start"
docker compose -f /config/compose.yaml start
echo "done"
exit $?

BIN
infra_conf/backups.pptx Normal file

Binary file not shown.

115
infra_conf/protonsync.sh Normal file
View file

@ -0,0 +1,115 @@
#!/usr/bin/env bash
set -euo pipefail
# -------------------------------------------------
# Rclone sync + Discord webhook (via ENV)
# -------------------------------------------------
# ---------- Vérification de la variable ----------
: "${DISCORD_WEBHOOK:?La variable d'environnement DISCORD_WEBHOOK n'est pas définie.}"
# ---------- Configuration ----------
SRC="/media/Seagate/archives/"
DST="remote:/backups/"
LOG_DIR="/media/Seagate/backups/"
mkdir -p "$LOG_DIR"
DATE=$(date +"%Y%m%d_%H%M%S")
TXT_LOG="${LOG_DIR}sync_${DATE}.log"
TMP_OUT=$(mktemp)
# ---------- Options Rclone ----------
RCLONE_OPTS=(
--transfers 1
--drive-chunk-size 32M
--protondrive-replace-existing-draft=true
--tpslimit 5
--timeout 30s
--low-level-retries 10
--stats 1s
--stats-one-line
)
# ---------- Lancement ----------
START_EPOCH=$(date +%s)
START_HUMAN=$(date +"%Y-%m-%d %H:%M:%S")
echo "▶️ Démarrage : $START_HUMAN"
rclone sync "$SRC" "$DST" "${RCLONE_OPTS[@]}" >"$TMP_OUT" 2>&1
RC=$?
END_EPOCH=$(date +%s)
END_HUMAN=$(date +"%Y-%m-%d %H:%M:%S")
DURATION=$((END_EPOCH-START_EPOCH))
# ---------- Extraction des stats ----------
STATS_LINE=$(grep -i "Transferred:" "$TMP_OUT" | tail -n1 || true)
[[ -z "$STATS_LINE" ]] && STATS_LINE="Aucune statistique disponible."
# ---------- Construction du rapport ----------
REPORT=$(cat <<EOF
**🗂️ Rapport de sauvegarde Rclone**
**Début** : $START_HUMAN
**Fin** : $END_HUMAN
**Durée** : ${DURATION}s
**État** : $(if [[ $RC -eq 0 ]]; then echo "✅ Succès"; else echo "❌ Erreur (code $RC)"; fi)
**Statistiques**
\`\`\`
$STATS_LINE
\`\`\`
EOF
)
# Enregistrement du rapport texte
echo "$REPORT" >"$TXT_LOG"
# ---------- Envoi vers Discord ----------
# Valeur par défaut au cas où RC ne serait pas défini
RC=${RC:-1}
# Couleur selon le résultat (vert = succès, rouge = échec)
COLOR=$([ "$RC" -eq 0 ] && echo 3066993 || echo 15158332)
# Construction sécurisée du JSON avec jq
payload=$(jq -n \
--arg username "Rclone-Backup" \
--arg description "$REPORT" \
--argjson color "$COLOR" \
'
{
username: $username,
avatar_url: $avatar,
allowed_mentions: { parse: [] },
embeds: [
{
title: "Rapport de sauvegarde",
description: $description,
color: $color
}
]
}
')
# Envoi avec curl, on capture le code HTTP pour le logging
http_code=$(curl -s -o /dev/null -w "%{http_code}" \
-X POST -H "Content-Type: application/json" \
-d "$payload" "$DISCORD_WEBHOOK")
if [[ "$http_code" != "204" ]]; then
echo "$(date) Webhook Discord échoué (HTTP $http_code)" >> ${LOG_DIR}/discord_error.log
echo $DISCORD_WEBHOOK >> ${LOG_DIR}/discord_error.log
echo $payload >> ${LOG_DIR}/discord_error.log
fi
# ---------- Affichage final ----------
echo "$REPORT"
echo "📁 Log complet → $TXT_LOG"
# Nettoyage
rm -f "$TMP_OUT"
echo "🧹 Suppression des logs de plus de 30jours…"
find "${LOG_DIR}" -type f -name "*.log" -mtime +30 -print -delete
exit $RC

47
infra_conf/tarbackup.sh Normal file
View file

@ -0,0 +1,47 @@
#!/usr/bin/env bash
# ------------------------------------------------------------
# Usage : ./archiver_et_nettoie.sh <répertoire_source>
# ------------------------------------------------------------
# ---------- 1⃣ Vérifications initiales ----------
if [[ -z "$1" ]]; then
echo "Usage : $0 <répertoire_source>"
exit 1
fi
SRC_DIR="$1"
if [[ ! -d "$SRC_DIR" ]]; then
echo "Erreur : '$SRC_DIR' nest pas un répertoire valide."
exit 1
fi
# ---------- 2⃣ Dossier de destination ----------
OUT_DIR="/media/Seagate/archives" # crée un sousdossier « archives » à côté du répertoire source
mkdir -p "$OUT_DIR" # le créer sil nexiste pas encore
# ---------- 3⃣ Création des archives ----------
cd "$SRC_DIR" || exit 1
for dir in */ ; do
[[ -d "$dir" ]] || continue # ignorer les fichiers éventuels
name="${dir%/}" # nom du sousdossier sans le slash final
timestamp=$(date +"%Y%m%d_%H%M%S") # horodatage au format souhaité
archive_name="${name}_${timestamp}.tar.gz"
# Crée l'archive dans le répertoire de sortie
tar -czf "${OUT_DIR}/${archive_name}" "$name"
echo "✅ Archive créée : ${OUT_DIR}/${archive_name}"
done
# ---------- 4⃣ Nettoyage des archives >30jours ----------
# find <dossier> -type f -name "*.tar.gz" -mtime +30 -delete
# -mtime +30 → fichier modifié il y a plus de 30jours
# -delete → suppression sécurisée
echo "🧹 Suppression des archives de plus de 30jours…"
find "$OUT_DIR" -type f -name "*.tar.gz" -mtime +30 -print -delete
echo "✅ Nettoyage terminé."

View file

@ -0,0 +1,15 @@
services:
nginx:
image: nginx:latest
container_name: nginx-proxy
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
- ./certs:/etc/nginx/certs
networks:
default:
external:
name: nginx-proxy-net

View file

@ -0,0 +1,94 @@
events {
worker_connections 4096; ## Default: 1024
}
http {
# radarr
server {
listen 80;
server_name griffix.hopto.org:7878;
location / {
proxy_pass http://192.168.1.55:7878;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
# server {
# listen 443 ssl;
# server_name www.first.com first.com;
# ssl_certificate /etc/nginx/certs/ssl_cert.crt;
# ssl_certificate_key /etc/nginx/certs/ssl_key.key;
# location / {
# proxy_pass http://first_container:first_port;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# }
# }
# }
# sonarr
server {
listen 80;
server_name griffix.hopto.org:8989;
location / {
proxy_pass http://192.168.1.55:8989;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
# server {
# listen 443 ssl;
# server_name www.second.com second.com;
# ssl_certificate /etc/nginx/certs/ssl_cert.crt;
# ssl_certificate_key /etc/nginx/certs/ssl_key.key;
# location / {
# proxy_pass http://second_container:second_port;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# }
# }
# }
# qbit
server {
listen 80;
server_name griffix.hopto.org:8081;
location / {
proxy_pass http://192.168.1.55:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
# server {
# listen 443 ssl;
# server_name www.second.com second.com;
# ssl_certificate /etc/nginx/certs/ssl_cert.crt;
# ssl_certificate_key /etc/nginx/certs/ssl_key.key;
# location / {
# proxy_pass http://second_container:second_port;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# }
# }
# Jellyfin
server {
listen 80;
server_name griffix.hopto.org:8096;
location / {
proxy_pass http://192.168.1.55:8096;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
# server {
# listen 443 ssl;
# server_name www.second.com second.com;
# ssl_certificate /etc/nginx/certs/ssl_cert.crt;
# ssl_certificate_key /etc/nginx/certs/ssl_key.key;
# location / {
# proxy_pass http://second_container:second_port;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# }
# }
}