Add ansible files
This commit is contained in:
167
ansible/01_install.yaml
Normal file
167
ansible/01_install.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
- name: Install kubernetes
|
||||
become: true
|
||||
hosts: incus-k8s-nodes
|
||||
tasks:
|
||||
- name: Disable SELinux
|
||||
ansible.posix.selinux:
|
||||
state: disabled
|
||||
|
||||
- name: Install nfs-utils
|
||||
ansible.builtin.dnf:
|
||||
name: nfs-utils
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Check if firewalld is installed
|
||||
ansible.builtin.command:
|
||||
cmd: rpm -q firewalld
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
register: firewalld_check
|
||||
|
||||
- name: Disable firewall
|
||||
ansible.builtin.systemd_service:
|
||||
name: firewalld
|
||||
state: stopped
|
||||
enabled: false
|
||||
masked: true
|
||||
when: firewalld_check.rc == 0
|
||||
|
||||
- name: Install iptables and iproute-tc
|
||||
ansible.builtin.dnf:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
loop:
|
||||
- iptables
|
||||
- iproute-tc
|
||||
|
||||
- name: Configure network
|
||||
block:
|
||||
- name: Configure kernel modules
|
||||
ansible.builtin.copy:
|
||||
src: files/etc_modules-load.d_k8s.conf
|
||||
dest: /etc/modules-load.d/k8s.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Enable overlay and br_netfilter module
|
||||
community.general.modprobe:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop:
|
||||
- overlay
|
||||
- br_netfilter
|
||||
|
||||
- name: Configure sysctl
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item.key }}"
|
||||
value: "{{ item.value }}"
|
||||
state: present
|
||||
reload: true
|
||||
loop:
|
||||
- { key: net.bridge.bridge-nf-call-iptables, value: 1 }
|
||||
- { key: net.bridge.bridge-nf-call-ip6tables, value: 1 }
|
||||
- { key: net.ipv4.ip_forward, value: 1 }
|
||||
|
||||
- name: Install kubernetes
|
||||
ansible.builtin.dnf:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop:
|
||||
- cri-o1.34
|
||||
- kubernetes1.34
|
||||
- kubernetes1.34-kubeadm
|
||||
- kubernetes1.34-client
|
||||
|
||||
- name: Start and enable cri-o
|
||||
ansible.builtin.systemd_service:
|
||||
name: crio
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Start and enable kubelet
|
||||
ansible.builtin.systemd_service:
|
||||
name: kubelet
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Check if kubeadm_init_result.txt exists on kube-main
|
||||
when: inventory_hostname == "kube-main"
|
||||
ansible.builtin.stat:
|
||||
path: /root/kubeadm_init_result.txt
|
||||
register: kubeadm_init_file_check
|
||||
failed_when: false
|
||||
|
||||
# --token=xn6uig.fkf8debm23p79wwv
|
||||
|
||||
- name: Run init command
|
||||
when: inventory_hostname == "kube-main" and kubeadm_init_file_check.stat.exists == false
|
||||
ansible.builtin.shell:
|
||||
cmd: "kubeadm init --pod-network-cidr=10.244.0.0/16 --cri-socket=unix:///var/run/crio/crio.sock > /root/kubeadm_init_result.txt"
|
||||
register: kubeadm_init_result
|
||||
changed_when: kubeadm_init_result.rc == 0
|
||||
failed_when: kubeadm_init_result.rc != 0
|
||||
|
||||
- name: AFTER INIT -- Check if kubeadm_init_result.txt exists on kube-main
|
||||
when: inventory_hostname == "kube-main"
|
||||
ansible.builtin.stat:
|
||||
path: /root/kubeadm_init_result.txt
|
||||
register: kubeadm_init_file_check
|
||||
|
||||
- name: Read init result file content
|
||||
when: inventory_hostname == "kube-main" and kubeadm_init_file_check.stat.exists == true
|
||||
ansible.builtin.command:
|
||||
cmd: cat /root/kubeadm_init_result.txt
|
||||
register: kubeadm_init_file_content
|
||||
|
||||
- name: Retrieve kubeadm_init_file_content for other tasks
|
||||
ansible.builtin.set_fact:
|
||||
kubeadm_init_file_content: "{{ kubeadm_init_file_content }}"
|
||||
run_once: true
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Set join command from file content
|
||||
ansible.builtin.set_fact:
|
||||
join_command: >-
|
||||
{{
|
||||
(kubeadm_init_file_content.stdout_lines[-2] +
|
||||
kubeadm_init_file_content.stdout_lines[-1])
|
||||
| to_json()
|
||||
| replace("\\", '')
|
||||
| replace("\t", '')
|
||||
| replace('"', '')
|
||||
}}
|
||||
|
||||
- name: Display join command on worker nodes
|
||||
when: inventory_hostname in ["kube-worker1", "kube-worker2"]
|
||||
ansible.builtin.debug:
|
||||
var: join_command
|
||||
|
||||
- name: Check if kubeadm join was already runned
|
||||
when: inventory_hostname in ["kube-worker1", "kube-worker2"]
|
||||
ansible.builtin.stat:
|
||||
path: /var/log/kubeadm_join.log
|
||||
register: kubeadm_join_file_check
|
||||
|
||||
- name: Join worker nodes to the cluster
|
||||
when: inventory_hostname in ["kube-worker1", "kube-worker2"] and kubeadm_join_file_check.stat.exists == false
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ join_command }} >> /var/log/kubeadm_join.log"
|
||||
register: kubeadm_join_result
|
||||
changed_when: kubeadm_join_result.rc == 0
|
||||
failed_when: kubeadm_join_result.rc != 0
|
||||
|
||||
- name: Create .kube directory on localhost
|
||||
ansible.builtin.file:
|
||||
path: ~/.kube
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Fetch admin.conf from kube-main
|
||||
when: inventory_hostname == "kube-main"
|
||||
ansible.builtin.fetch:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: ~/.kube/config
|
||||
flat: true
|
||||
32
ansible/02_post_install.yaml
Normal file
32
ansible/02_post_install.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
- name: Post install
|
||||
hosts: localhost
|
||||
vars_files:
|
||||
- config/config_vars.yaml
|
||||
tasks:
|
||||
- name: Apply network overlay
|
||||
delegate_to: localhost
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
src: https://github.com/coreos/flannel/raw/master/Documentation/kube-flannel.yml
|
||||
|
||||
- name: Add CSI driver helm repo
|
||||
delegate_to: localhost
|
||||
kubernetes.core.helm_repository:
|
||||
name: nfs-subdir-external-provisioner
|
||||
repo_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
|
||||
|
||||
- name: Install CSI driver
|
||||
delegate_to: localhost
|
||||
kubernetes.core.helm:
|
||||
name: nfs-subdir-external-provisioner
|
||||
chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
|
||||
update_repo_cache: true
|
||||
create_namespace: false
|
||||
release_namespace: kube-system
|
||||
values:
|
||||
storageClass:
|
||||
name: nfs-csi
|
||||
defaultClass: true
|
||||
nfs:
|
||||
server: "{{ nfs.server }}"
|
||||
path: "{{ nfs.path }}"
|
||||
52
ansible/03_setup_traefik.yaml
Normal file
52
ansible/03_setup_traefik.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
- name: Setup Traefik
|
||||
vars_files:
|
||||
- secrets/traefik_secrets.yaml
|
||||
hosts:
|
||||
- localhost
|
||||
tasks:
|
||||
- name: Create Traefik namespace
|
||||
delegate_to: localhost
|
||||
kubernetes.core.k8s:
|
||||
name: traefik
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
state: present
|
||||
|
||||
- name: Add Traefik chart repo
|
||||
delegate_to: localhost
|
||||
kubernetes.core.helm_repository:
|
||||
name: traefik
|
||||
repo_url: "https://traefik.github.io/charts"
|
||||
|
||||
- name: Setup Traefik config map for OVH DNS
|
||||
delegate_to: localhost
|
||||
kubernetes.core.k8s:
|
||||
template: files/traefik_ovh_secret.template.yaml
|
||||
state: present
|
||||
|
||||
# - name: Create PVC for Traefik data
|
||||
# delegate_to: localhost
|
||||
# kubernetes.core.k8s:
|
||||
# state: present
|
||||
# definition:
|
||||
# apiVersion: v1
|
||||
# kind: PersistentVolumeClaim
|
||||
# metadata:
|
||||
# name: traefik-data
|
||||
# namespace: traefik
|
||||
# spec:
|
||||
# accessModes:
|
||||
# - ReadWriteOnce
|
||||
# resources:
|
||||
# requests:
|
||||
# storage: 1G
|
||||
|
||||
- name: Setup Traefik
|
||||
delegate_to: localhost
|
||||
kubernetes.core.helm:
|
||||
name: traefik
|
||||
chart_ref: traefik/traefik
|
||||
update_repo_cache: true
|
||||
create_namespace: true
|
||||
release_namespace: traefik
|
||||
values: "{{ lookup('template', 'files/traefik_values.template.yaml') | from_yaml }}"
|
||||
57
ansible/04_setup_gitea.yaml
Normal file
57
ansible/04_setup_gitea.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
- name: Setup Gitea
|
||||
vars_files:
|
||||
- secrets/git_secrets.yaml
|
||||
- config/config_vars.yaml
|
||||
hosts:
|
||||
- localhost
|
||||
tasks:
|
||||
- name: Add Gitea chart repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: gitea-charts
|
||||
repo_url: "https://dl.gitea.com/charts/"
|
||||
|
||||
- name: Setup Gitea
|
||||
kubernetes.core.helm:
|
||||
name: gitea
|
||||
chart_ref: gitea-charts/gitea
|
||||
update_repo_cache: true
|
||||
create_namespace: true
|
||||
release_namespace: gitea
|
||||
values: "{{ lookup('template', 'files/gitea_values.template.yaml') | from_yaml }}"
|
||||
|
||||
- name: Setup gitea service
|
||||
kubernetes.core.k8s:
|
||||
template: files/gitea_svc.template.yaml
|
||||
state: present
|
||||
|
||||
- name: Get Gitea runner registration token
|
||||
uri:
|
||||
url: "https://{{ git.domain }}/api/v1/admin/actions/runners/registration-token"
|
||||
method: POST
|
||||
headers:
|
||||
Content-Type: "application/json"
|
||||
Authorization: "Basic {{ (git_admin_username + ':' + git_admin_password) | b64encode }}"
|
||||
body_format: json
|
||||
body:
|
||||
name: "admin-token"
|
||||
scopes: ["all"]
|
||||
register: gitea_token_response
|
||||
no_log: true
|
||||
|
||||
- name: Create Gitea runner registration token secret
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
namespace: gitea
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: gitea-runner-registration-token
|
||||
type: Opaque
|
||||
data:
|
||||
token: "{{ gitea_token_response.json.token | b64encode }}"
|
||||
|
||||
- name: Setup gitea runner
|
||||
kubernetes.core.k8s:
|
||||
template: files/gitea_runner.template.yaml
|
||||
state: present
|
||||
57
ansible/04_setup_gitlab.yaml
Normal file
57
ansible/04_setup_gitlab.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
- name: Setup GitLab
|
||||
vars_files:
|
||||
- secrets/gitea_secrets.yaml
|
||||
- config/config_vars.yaml
|
||||
hosts:
|
||||
- localhost
|
||||
tasks:
|
||||
- name: Add GitLab chart repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: gitlab
|
||||
repo_url: "https://charts.gitlab.io/"
|
||||
|
||||
- name: Setup GitLab
|
||||
kubernetes.core.helm:
|
||||
name: gitlab
|
||||
chart_ref: gitlab/gitlab
|
||||
update_repo_cache: true
|
||||
create_namespace: true
|
||||
release_namespace: gitlab
|
||||
values: "{{ lookup('template', 'files/gitlab_values.template.yaml') | from_yaml }}"
|
||||
|
||||
# - name: Setup GitLab service
|
||||
# kubernetes.core.k8s:
|
||||
# template: files/gitea_svc.template.yaml
|
||||
# state: present
|
||||
|
||||
# - name: Get Gitea runner registration token
|
||||
# uri:
|
||||
# url: "https://{{ gitea.domain }}/api/v1/admin/actions/runners/registration-token"
|
||||
# method: POST
|
||||
# headers:
|
||||
# Content-Type: "application/json"
|
||||
# Authorization: "Basic {{ (gitea_admin_username + ':' + gitea_admin_password) | b64encode }}"
|
||||
# body_format: json
|
||||
# body:
|
||||
# name: "admin-token"
|
||||
# scopes: ["all"]
|
||||
# register: gitea_token_response
|
||||
# no_log: true
|
||||
|
||||
# - name: Create Gitea runner registration token secret
|
||||
# kubernetes.core.k8s:
|
||||
# state: present
|
||||
# namespace: gitea
|
||||
# definition:
|
||||
# apiVersion: v1
|
||||
# kind: Secret
|
||||
# metadata:
|
||||
# name: gitea-runner-registration-token
|
||||
# type: Opaque
|
||||
# data:
|
||||
# token: "{{ gitea_token_response.json.token | b64encode }}"
|
||||
|
||||
# # - name: Setup gitea runner
|
||||
# # kubernetes.core.k8s:
|
||||
# # template: files/gitea_runner.template.yaml
|
||||
# # state: present
|
||||
60
ansible/04_setup_monitoring.yaml
Normal file
60
ansible/04_setup_monitoring.yaml
Normal file
@@ -0,0 +1,60 @@
|
||||
- name: Setup Monitoring
|
||||
vars_files:
|
||||
- config/config_vars.yaml
|
||||
hosts:
|
||||
- localhost
|
||||
tasks:
|
||||
- name: Add Prometheus chart repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: prometheus
|
||||
repo_url: "https://prometheus-community.github.io/helm-charts"
|
||||
|
||||
- name: Setup Prometheus
|
||||
kubernetes.core.helm:
|
||||
name: prometheus
|
||||
chart_ref: prometheus/kube-prometheus-stack
|
||||
update_repo_cache: true
|
||||
create_namespace: true
|
||||
release_namespace: monitoring
|
||||
|
||||
# - name: Setup OneDev service
|
||||
# kubernetes.core.k8s:
|
||||
# template: files/onedev_svc.template.yaml
|
||||
# state: present
|
||||
|
||||
- name: Create IngressRoute for Grafana
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: grafana-ingress
|
||||
namespace: monitoring
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`monitor.vanespen.dev`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: prometheus-grafana
|
||||
port: 80
|
||||
tls:
|
||||
certResolver: letsencrypt_dns
|
||||
|
||||
- name: Add Grafana chart repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: grafana
|
||||
repo_url: "https://grafana.github.io/helm-charts"
|
||||
|
||||
- name: Install Loki
|
||||
kubernetes.core.helm:
|
||||
name: loki
|
||||
chart_ref: grafana/loki
|
||||
update_repo_cache: true
|
||||
create_namespace: true
|
||||
release_namespace: monitoring
|
||||
chart_version: 5.35.0
|
||||
values_files:
|
||||
- files/loki_values.yaml
|
||||
36
ansible/04_setup_onedev.yaml
Normal file
36
ansible/04_setup_onedev.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
- name: Setup OneDev
|
||||
vars_files:
|
||||
- secrets/git_secrets.yaml
|
||||
- config/config_vars.yaml
|
||||
hosts:
|
||||
- localhost
|
||||
tasks:
|
||||
- name: Add OneDev chart repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: onedev
|
||||
repo_url: "https://code.onedev.io/onedev/~helm"
|
||||
|
||||
- name: Setup OneDev
|
||||
kubernetes.core.helm:
|
||||
name: onedev
|
||||
chart_ref: onedev/onedev
|
||||
update_repo_cache: true
|
||||
create_namespace: true
|
||||
release_namespace: onedev
|
||||
values:
|
||||
onedev:
|
||||
separateSSH:
|
||||
enabled: true
|
||||
persistence:
|
||||
storageClassName: "nfs-csi"
|
||||
size: 20Gi
|
||||
initSettings:
|
||||
user: "{{ git_admin_username }}"
|
||||
password: "{{ git_admin_password }}"
|
||||
email: "{{ git_admin_email }}"
|
||||
serverUrl: "{{ git.domain }}"
|
||||
|
||||
- name: Setup OneDev service
|
||||
kubernetes.core.k8s:
|
||||
template: files/onedev_svc.template.yaml
|
||||
state: present
|
||||
87
ansible/05_setup_argocd.yaml
Normal file
87
ansible/05_setup_argocd.yaml
Normal file
@@ -0,0 +1,87 @@
|
||||
- name: Setup ArgoCD
|
||||
hosts:
|
||||
- localhost
|
||||
vars_files:
|
||||
- config/config_vars.yaml
|
||||
tasks:
|
||||
- name: Check if argocd command is available
|
||||
ansible.builtin.command:
|
||||
cmd: "which argocd"
|
||||
register: argocd_check
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Fail if argocd command is not available
|
||||
when: argocd_check.rc != 0
|
||||
ansible.builtin.fail:
|
||||
msg: "argocd command not found. Please install argocd CLI."
|
||||
|
||||
- name: Create ArgoCD namespace
|
||||
kubernetes.core.k8s:
|
||||
name: argocd
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
state: present
|
||||
|
||||
- name: Install ArgoCD
|
||||
kubernetes.core.k8s:
|
||||
src: https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
|
||||
state: present
|
||||
namespace: argocd
|
||||
|
||||
- name: Patch ArgoCD ConfigMap
|
||||
kubernetes.core.k8s:
|
||||
kind: ConfigMap
|
||||
name: argocd-cmd-params-cm
|
||||
namespace: argocd
|
||||
merge_type: merge
|
||||
definition:
|
||||
data:
|
||||
server.insecure: "true"
|
||||
|
||||
- name: Get ArgoCD server pod start time
|
||||
kubernetes.core.k8s_info:
|
||||
kind: Pod
|
||||
namespace: argocd
|
||||
label_selectors:
|
||||
- app.kubernetes.io/name = argocd-server
|
||||
register: argocd_pods
|
||||
|
||||
- name: Calculate time since ArgoCD server started
|
||||
ansible.builtin.set_fact:
|
||||
argocd_uptime: "{{ ((ansible_date_time.iso8601.replace('T', ' ').replace('Z', '') | to_datetime) - (argocd_pods.resources[0].status.startTime.replace('T', ' ').replace('Z', '') | to_datetime)).total_seconds() }}"
|
||||
when: argocd_pods.resources | length > 0
|
||||
|
||||
- name: Display ArgoCD server uptime
|
||||
ansible.builtin.debug:
|
||||
msg: "ArgoCD server has been running for {{ argocd_uptime }}"
|
||||
when: argocd_uptime is defined
|
||||
|
||||
- name: Restart ArgoCD server pods (only if running for less than 2 minutes)
|
||||
when: argocd_uptime is defined and argocd_uptime < 120
|
||||
kubernetes.core.k8s:
|
||||
kind: Deployment
|
||||
name: argocd-server
|
||||
namespace: argocd
|
||||
state: present
|
||||
definition:
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/restartedAt: "{{ ansible_date_time.iso8601 }}"
|
||||
|
||||
- name: Configure ArgoCD Ingress
|
||||
kubernetes.core.k8s:
|
||||
template: files/argocd_svc.template.yaml
|
||||
state: present
|
||||
namespace: argocd
|
||||
|
||||
- name: Get K8s context
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl config get-contexts -o name"
|
||||
register: k8s_context
|
||||
|
||||
- name: Configure ArgoCD context
|
||||
ansible.builtin.command:
|
||||
cmd: "argocd cluster add {{ k8s_context.stdout_lines[0] }}"
|
||||
9
ansible/config/config_vars.yaml
Normal file
9
ansible/config/config_vars.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
nfs:
|
||||
server: 192.168.1.111
|
||||
path: /srv/nfs_k8s
|
||||
|
||||
argocd:
|
||||
domain: argocd.vanespen.dev
|
||||
|
||||
git:
|
||||
domain: git.vanespen.dev
|
||||
25
ansible/files/argocd_svc.template.yaml
Normal file
25
ansible/files/argocd_svc.template.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: argocd-server
|
||||
namespace: argocd
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- kind: Rule
|
||||
match: Host(`{{ argocd.domain }}`)
|
||||
priority: 10
|
||||
services:
|
||||
- name: argocd-server
|
||||
port: 80
|
||||
- kind: Rule
|
||||
match: Host(`{{ argocd.domain }}`) && Header(`Content-Type`, `application/grpc`)
|
||||
priority: 11
|
||||
services:
|
||||
- name: argocd-server
|
||||
port: 80
|
||||
scheme: h2c
|
||||
tls:
|
||||
certResolver: letsencrypt_dns
|
||||
2
ansible/files/etc_modules-load.d_k8s.conf
Normal file
2
ansible/files/etc_modules-load.d_k8s.conf
Normal file
@@ -0,0 +1,2 @@
|
||||
overlay
|
||||
br_netfilter
|
||||
122
ansible/files/gitea_runner.template.yaml
Normal file
122
ansible/files/gitea_runner.template.yaml
Normal file
@@ -0,0 +1,122 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: gitea-act-runner
|
||||
namespace: gitea
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: gitea-act-runner-config
|
||||
namespace: gitea
|
||||
data:
|
||||
config.yaml: |
|
||||
log:
|
||||
level: info
|
||||
cache:
|
||||
enabled: false
|
||||
container:
|
||||
valid_volumes:
|
||||
- /certs
|
||||
options: |
|
||||
--add-host=docker:host-gateway -v /certs:/certs
|
||||
-e "DOCKER_HOST=tcp://docker:2376/"
|
||||
-e "DOCKER_TLS_VERIFY=1"
|
||||
-e "DOCKER_CERT_PATH=/certs/client"
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: gitea-docker-daemon-config
|
||||
namespace: gitea
|
||||
data:
|
||||
daemon.json: |
|
||||
{ "insecure-registries": ["gitea-http.gitea.svc.cluster.local:3000"] }
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: gitea-act-runner-dind
|
||||
namespace: gitea
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: gitea-act-runner-dind
|
||||
serviceName: gitea-act-runner-dind
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: gitea-act-runner-dind
|
||||
spec:
|
||||
containers:
|
||||
- name: runner
|
||||
image: docker.io/gitea/act_runner:nightly
|
||||
env:
|
||||
- name: DOCKER_HOST
|
||||
value: "tcp://127.0.0.1:2376"
|
||||
- name: DOCKER_CERT_PATH
|
||||
value: /certs/client
|
||||
- name: DOCKER_TLS_VERIFY
|
||||
value: "1"
|
||||
- name: ZOMBIE_TASK_TIMEOUT
|
||||
value: "30m"
|
||||
- name: GITEA_RUNNER_REGISTRATION_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: gitea-runner-registration-token
|
||||
key: token
|
||||
- name: CONFIG_FILE
|
||||
value: /config.yaml
|
||||
- name: GITEA_INSTANCE_URL
|
||||
value: http://gitea-http.gitea.svc.cluster.local:3000
|
||||
- name: CONFIG_FILE
|
||||
value: /actrunner/config.yaml
|
||||
|
||||
volumeMounts:
|
||||
- name: gitea-act-runner-data
|
||||
mountPath: /data
|
||||
- name: docker-certs
|
||||
mountPath: /certs/client
|
||||
- name: gitea-act-runner-config
|
||||
mountPath: /actrunner
|
||||
|
||||
- name: daemon
|
||||
image: docker:27.1.2-dind
|
||||
env:
|
||||
- name: DOCKER_TLS_CERTDIR
|
||||
value: /certs
|
||||
- name: DOCKER_HOST
|
||||
value: tcp://127.0.0.1:2376
|
||||
- name: DOCKER_TLS_VERIFY
|
||||
value: "1"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: docker-certs
|
||||
mountPath: /certs/client
|
||||
- name: gitea-docker-daemon-config
|
||||
mountPath: /etc/docker
|
||||
|
||||
volumes:
|
||||
- name: docker-certs
|
||||
emptyDir: {}
|
||||
- name: gitea-act-runner-config
|
||||
configMap:
|
||||
name: gitea-act-runner-config
|
||||
- name: gitea-act-runner-data
|
||||
persistentVolumeClaim:
|
||||
claimName: gitea-act-runner
|
||||
- name: gitea-docker-daemon-config
|
||||
configMap:
|
||||
name: gitea-docker-daemon-config
|
||||
100
ansible/files/gitea_runner.template.yaml.WORKING
Normal file
100
ansible/files/gitea_runner.template.yaml.WORKING
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: act-runner-data
|
||||
namespace: gitea
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
storageClassName: nfs-csi
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: act-runner-certs
|
||||
namespace: gitea
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: nfs-csi
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: act-runner
|
||||
name: act-runner
|
||||
namespace: gitea
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: act-runner
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: act-runner
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: docker-certs
|
||||
emptyDir: {}
|
||||
- name: runner-data
|
||||
# emptyDir: {}
|
||||
persistentVolumeClaim:
|
||||
claimName: act-runner-data
|
||||
containers:
|
||||
- name: runner
|
||||
image: docker.io/gitea/act_runner:latest-dind
|
||||
# command: ["sh", "-c", "while ! nc -z localhost 2376 </dev/null; do echo 'waiting for docker daemon...'; sleep 5; done; /sbin/tini -- /opt/act/run.sh"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
env:
|
||||
- name: DOCKER_HOST
|
||||
value: tcp://127.0.0.1:2376
|
||||
# value: unix:///var/run/docker.sock
|
||||
- name: DOCKER_CERT_PATH
|
||||
value: /certs/client
|
||||
- name: DOCKER_TLS_VERIFY
|
||||
value: "0"
|
||||
- name: GITEA_INSTANCE_URL
|
||||
value: http://gitea-http.gitea.svc.cluster.local:3000
|
||||
# - name: GITEA_RUNNER_LABELS
|
||||
# value: "ubuntu-latest:docker://docker.io/gitea/runner-images:ubuntu-latest"
|
||||
- name: GITEA_RUNNER_REGISTRATION_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: gitea-runner-registration-token
|
||||
key: token
|
||||
volumeMounts:
|
||||
- name: docker-certs
|
||||
mountPath: /certs
|
||||
- name: runner-data
|
||||
mountPath: /data
|
||||
# - name: daemon
|
||||
# image: docker:29.0.2-dind
|
||||
# env:
|
||||
# - name: DOCKER_TLS_CERTDIR
|
||||
# value: /certs
|
||||
# - name: DOCKER_HOST
|
||||
# value: tcp://127.0.0.1:2376
|
||||
# - name: DOCKER_TLS_VERIFY
|
||||
# value: "0"
|
||||
# securityContext:
|
||||
# privileged: true
|
||||
# volumeMounts:
|
||||
# - name: docker-certs
|
||||
# mountPath: /certs
|
||||
110
ansible/files/gitea_runner_config.yaml
Normal file
110
ansible/files/gitea_runner_config.yaml
Normal file
@@ -0,0 +1,110 @@
|
||||
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
||||
|
||||
# You don't have to copy this file to your instance,
|
||||
# just run `./act_runner generate-config > config.yaml` to generate a config file.
|
||||
|
||||
log:
|
||||
# The level of logging, can be trace, debug, info, warn, error, fatal
|
||||
level: info
|
||||
|
||||
runner:
|
||||
# Where to store the registration result.
|
||||
file: .runner
|
||||
# Execute how many tasks concurrently at the same time.
|
||||
capacity: 1
|
||||
# Extra environment variables to run jobs.
|
||||
envs:
|
||||
A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||
A_TEST_ENV_NAME_2: a_test_env_value_2
|
||||
# Extra environment variables to run jobs from a file.
|
||||
# It will be ignored if it's empty or the file doesn't exist.
|
||||
env_file: .env
|
||||
# The timeout for a job to be finished.
|
||||
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
||||
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
||||
timeout: 3h
|
||||
# The timeout for the runner to wait for running jobs to finish when shutting down.
|
||||
# Any running jobs that haven't finished after this timeout will be cancelled.
|
||||
shutdown_timeout: 0s
|
||||
# Whether skip verifying the TLS certificate of the Gitea instance.
|
||||
insecure: false
|
||||
# The timeout for fetching the job from the Gitea instance.
|
||||
fetch_timeout: 5s
|
||||
# The interval for fetching the job from the Gitea instance.
|
||||
fetch_interval: 2s
|
||||
# The github_mirror of a runner is used to specify the mirror address of the github that pulls the action repository.
|
||||
# It works when something like `uses: actions/checkout@v4` is used and DEFAULT_ACTIONS_URL is set to github,
|
||||
# and github_mirror is not empty. In this case,
|
||||
# it replaces https://github.com with the value here, which is useful for some special network environments.
|
||||
github_mirror: ""
|
||||
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
||||
# Like: "macos-arm64:host" or "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
|
||||
# Find more images provided by Gitea at https://gitea.com/docker.gitea.com/runner-images .
|
||||
# If it's empty when registering, it will ask for inputting labels.
|
||||
# If it's empty when execute `daemon`, will use labels in `.runner` file.
|
||||
labels:
|
||||
- "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
|
||||
- "ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04"
|
||||
- "ubuntu-20.04:docker://docker.gitea.com/runner-images:ubuntu-20.04"
|
||||
|
||||
cache:
|
||||
# Enable cache server to use actions/cache.
|
||||
enabled: true
|
||||
# The directory to store the cache data.
|
||||
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||
dir: ""
|
||||
# The host of the cache server.
|
||||
# It's not for the address to listen, but the address to connect from job containers.
|
||||
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||
host: ""
|
||||
# The port of the cache server.
|
||||
# 0 means to use a random available port.
|
||||
port: 0
|
||||
# The external cache server URL. Valid only when enable is true.
|
||||
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
||||
# The URL should generally end with "/".
|
||||
external_server: ""
|
||||
|
||||
container:
|
||||
# Specifies the network to which the container will connect.
|
||||
# Could be host, bridge or the name of a custom network.
|
||||
# If it's empty, act_runner will create a network automatically.
|
||||
network: ""
|
||||
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
||||
privileged: false
|
||||
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
||||
options:
|
||||
# The parent directory of a job's working directory.
|
||||
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
|
||||
# If the path starts with '/', the '/' will be trimmed.
|
||||
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
|
||||
# If it's empty, /workspace will be used.
|
||||
workdir_parent:
|
||||
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
||||
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
||||
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
||||
# valid_volumes:
|
||||
# - data
|
||||
# - /src/*.json
|
||||
# If you want to allow any volume, please use the following configuration:
|
||||
# valid_volumes:
|
||||
# - '**'
|
||||
valid_volumes: []
|
||||
# overrides the docker client host with the specified one.
|
||||
# If it's empty, act_runner will find an available docker host automatically.
|
||||
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
||||
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
||||
docker_host: ""
|
||||
# Pull docker image(s) even if already present
|
||||
force_pull: true
|
||||
# Rebuild docker image(s) even if already present
|
||||
force_rebuild: false
|
||||
# Always require a reachable docker daemon, even if not required by act_runner
|
||||
require_docker: false
|
||||
# Timeout to wait for the docker daemon to be reachable, if docker is required by require_docker or act_runner
|
||||
docker_timeout: 0s
|
||||
|
||||
host:
|
||||
# The parent directory of a job's working directory.
|
||||
# If it's empty, $HOME/.cache/act/ will be used.
|
||||
workdir_parent:
|
||||
34
ansible/files/gitea_svc.template.yaml
Normal file
34
ansible/files/gitea_svc.template.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: gitea-http
|
||||
namespace: gitea
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`{{ git.domain }}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: gitea-http
|
||||
port: 3000
|
||||
tls:
|
||||
certResolver: letsencrypt_dns
|
||||
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: gitea-ssh
|
||||
namespace: gitea
|
||||
spec:
|
||||
entryPoints:
|
||||
- ssh
|
||||
routes:
|
||||
- match: HostSNI(`*`)
|
||||
priority: 10
|
||||
services:
|
||||
- name: gitea-ssh
|
||||
port: 22
|
||||
weight: 10
|
||||
27
ansible/files/gitea_values.template.yaml
Normal file
27
ansible/files/gitea_values.template.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
valkey-cluster:
|
||||
enabled: false
|
||||
valkey:
|
||||
enabled: true
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresql-ha:
|
||||
enabled: false
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: nfs-csi
|
||||
|
||||
gitea:
|
||||
config:
|
||||
database:
|
||||
DB_TYPE: postgres
|
||||
indexer:
|
||||
ISSUE_INDEXER_TYPE: bleve
|
||||
REPO_INDEXER_ENABLED: true
|
||||
server:
|
||||
DOMAIN: "{{ git.domain }}"
|
||||
ROOT_URL: "https://{{ git.domain }}"
|
||||
admin:
|
||||
username: "{{ git_admin_username }}"
|
||||
password: "{{ git_admin_password }}"
|
||||
email: "admin@{{ git.domain }}"
|
||||
29
ansible/files/gitlab_values.template.yaml
Normal file
29
ansible/files/gitlab_values.template.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
nginx-ingress:
|
||||
enabled: false
|
||||
|
||||
global:
|
||||
edition: "ce"
|
||||
hosts:
|
||||
domain: "{{ gitlab.domain }}"
|
||||
https: false
|
||||
ingress:
|
||||
configureCertmanager: false
|
||||
tls:
|
||||
enabled: false
|
||||
|
||||
gitlab:
|
||||
webservice:
|
||||
ingress:
|
||||
tls:
|
||||
enabled: false
|
||||
registry:
|
||||
ingress:
|
||||
tls:
|
||||
enabled: false
|
||||
minio:
|
||||
ingress:
|
||||
tls:
|
||||
enabled: false
|
||||
|
||||
minio:
|
||||
image: docker.io/minio/minio
|
||||
128
ansible/files/local-path-storage.yaml
Normal file
128
ansible/files/local-path-storage.yaml
Normal file
@@ -0,0 +1,128 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: local-path-provisioner-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes", "persistentvolumeclaims", "configmaps"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints", "persistentvolumes", "pods"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-path-provisioner-bind
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: local-path-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-path-provisioner
|
||||
namespace: local-path-storage
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: local-path-provisioner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-path-provisioner
|
||||
spec:
|
||||
serviceAccountName: local-path-provisioner-service-account
|
||||
containers:
|
||||
- name: local-path-provisioner
|
||||
image: docker.io/rancher/local-path-provisioner:v0.0.24
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: local-path-config
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-path
|
||||
provisioner: rancher.io/local-path
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: local-path-config
|
||||
namespace: local-path-storage
|
||||
data:
|
||||
config.json: |-
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
setup: |-
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
mkdir -m 0777 -p "$VOL_DIR"
|
||||
teardown: |-
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
rm -rf "$VOL_DIR"
|
||||
helperPod.yaml: |-
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: helper-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: helper-pod
|
||||
image: docker.io/busybox
|
||||
imagePullPolicy: IfNotPresent
|
||||
54
ansible/files/loki_values.yaml
Normal file
54
ansible/files/loki_values.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
loki:
|
||||
commonConfig:
|
||||
replication_factor: 1
|
||||
schemaConfig:
|
||||
configs:
|
||||
- from: "2024-04-01"
|
||||
store: tsdb
|
||||
object_store: s3
|
||||
schema: v13
|
||||
index:
|
||||
prefix: loki_index_
|
||||
period: 24h
|
||||
pattern_ingester:
|
||||
enabled: true
|
||||
limits_config:
|
||||
allow_structured_metadata: true
|
||||
volume_enabled: true
|
||||
ruler:
|
||||
enable_api: true
|
||||
|
||||
minio:
|
||||
enabled: true
|
||||
|
||||
deploymentMode: SingleBinary
|
||||
|
||||
singleBinary:
|
||||
replicas: 1
|
||||
|
||||
# Zero out replica counts of other deployment modes
|
||||
backend:
|
||||
replicas: 0
|
||||
read:
|
||||
replicas: 0
|
||||
write:
|
||||
replicas: 0
|
||||
|
||||
ingester:
|
||||
replicas: 0
|
||||
querier:
|
||||
replicas: 0
|
||||
queryFrontend:
|
||||
replicas: 0
|
||||
queryScheduler:
|
||||
replicas: 0
|
||||
distributor:
|
||||
replicas: 0
|
||||
compactor:
|
||||
replicas: 0
|
||||
indexGateway:
|
||||
replicas: 0
|
||||
bloomCompactor:
|
||||
replicas: 0
|
||||
bloomGateway:
|
||||
replicas: 0
|
||||
33
ansible/files/onedev_svc.template.yaml
Normal file
33
ansible/files/onedev_svc.template.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: onedev-http
|
||||
namespace: onedev
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`{{ git.domain }}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: onedev
|
||||
port: 80
|
||||
tls:
|
||||
certResolver: letsencrypt_dns
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: onedev-ssh
|
||||
namespace: onedev
|
||||
spec:
|
||||
entryPoints:
|
||||
- ssh
|
||||
routes:
|
||||
- match: HostSNI(`*`)
|
||||
priority: 10
|
||||
services:
|
||||
- name: onedev
|
||||
port: 22
|
||||
weight: 10
|
||||
12
ansible/files/traefik_ovh_secret.template.yaml
Normal file
12
ansible/files/traefik_ovh_secret.template.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: ovh-api-credentials
|
||||
namespace: traefik
|
||||
type: Opaque
|
||||
data:
|
||||
OVH_ENDPOINT: "{{ ovh_creds.ovh_endpoint | b64encode }}"
|
||||
OVH_APPLICATION_KEY: "{{ ovh_creds.ovh_application_key | b64encode }}"
|
||||
OVH_APPLICATION_SECRET: "{{ ovh_creds.ovh_application_secret | b64encode }}"
|
||||
OVH_CONSUMER_KEY: "{{ ovh_creds.ovh_consumer_key | b64encode }}"
|
||||
86
ansible/files/traefik_values.template.yaml
Normal file
86
ansible/files/traefik_values.template.yaml
Normal file
@@ -0,0 +1,86 @@
|
||||
---
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 1G
|
||||
|
||||
ports:
|
||||
web:
|
||||
exposedPort: 80
|
||||
nodePort: 30080
|
||||
websecure:
|
||||
exposedPort: 443
|
||||
nodePort: 30443
|
||||
tls:
|
||||
enabled: true
|
||||
ssh:
|
||||
port: 2222
|
||||
expose:
|
||||
default: true
|
||||
exposedPort: 2222
|
||||
nodePort: 30022
|
||||
protocol: TCP
|
||||
|
||||
service:
|
||||
type: NodePort
|
||||
|
||||
ingressRoute:
|
||||
dashboard:
|
||||
enabled: true
|
||||
matchRule: Host(`traefik.kube-main.lab`)
|
||||
entryPoints:
|
||||
- web
|
||||
|
||||
providers:
|
||||
kubernetesCRD:
|
||||
allowExternalNameServices: true
|
||||
kubernetesGateway:
|
||||
enabled: true
|
||||
|
||||
gateway:
|
||||
listeners:
|
||||
web:
|
||||
namespacePolicy:
|
||||
from: All
|
||||
|
||||
certificatesResolvers:
|
||||
letsencrypt_dns_stag:
|
||||
acme:
|
||||
email: "{{ email }}"
|
||||
caServer: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
storage: "/data/acme_dns_stag.json"
|
||||
dnsChallenge:
|
||||
provider: ovh
|
||||
delayBeforeCheck: 0
|
||||
letsencrypt_dns:
|
||||
acme:
|
||||
email: "{{ email }}"
|
||||
storage: "/data/acme_dns.json"
|
||||
dnsChallenge:
|
||||
provider: ovh
|
||||
delayBeforeCheck: 0
|
||||
|
||||
env:
|
||||
- name: OVH_ENDPOINT
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: ovh-api-credentials
|
||||
key: OVH_ENDPOINT
|
||||
- name: OVH_APPLICATION_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: ovh-api-credentials
|
||||
key: OVH_APPLICATION_KEY
|
||||
- name: OVH_APPLICATION_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: ovh-api-credentials
|
||||
key: OVH_APPLICATION_SECRET
|
||||
- name: OVH_CONSUMER_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: ovh-api-credentials
|
||||
key: OVH_CONSUMER_KEY
|
||||
|
||||
logs:
|
||||
general:
|
||||
level: INFO
|
||||
7
ansible/inventory.ini
Normal file
7
ansible/inventory.ini
Normal file
@@ -0,0 +1,7 @@
|
||||
[incus-k8s-nodes]
|
||||
kube-main ansible_host=10.1.1.100 ansible_ssh_user=kubeadmin
|
||||
kube-worker1 ansible_host=10.1.1.101 ansible_ssh_user=kubeadmin
|
||||
kube-worker2 ansible_host=10.1.1.102 ansible_ssh_user=kubeadmin
|
||||
|
||||
[incus]
|
||||
incus ansible_host=192.168.1.111 ansible_ssh_user=saucisson
|
||||
10
ansible/secrets/git_secrets.yaml
Normal file
10
ansible/secrets/git_secrets.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65633263613338306563343430313465393536613336616366363135663130636664396339623033
|
||||
3539326336333531396533313834306161373262353463610a356562373863383335356131326464
|
||||
31393032386130303234313438363665653038653761653432373031343931376264386139643934
|
||||
6335313930663334310a636231643066663063393630386136333532323437666138623338373838
|
||||
65366139626366633166316231393838616263353162613738353331333861656438633731643466
|
||||
31626539666161373135393833666263303033316331666634326233343635633538303634336232
|
||||
30363863633530656139393532663837396465316165666461386637666665303233343931383033
|
||||
32653562303064656263373330663866373131613335323736616535313238363465333534663361
|
||||
37616639663737663164343361636161623664356432633136663561326235633763
|
||||
9
ansible/secrets/gitlab_secrets.yaml
Normal file
9
ansible/secrets/gitlab_secrets.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
34306334366634323836323362376431633262356232326334393935633937323235303766623839
|
||||
3965613436356434313863393433366665333036303830650a633666656136663562336433616431
|
||||
31653266666163323661623535343966326563656336666238653332313135623263353939663133
|
||||
3033353730633932300a383864353161633362336632333864336131353833633039373662653837
|
||||
39383036373531643964323363306331383239333464343532666139323834383866666365326665
|
||||
62613331646330353432353236396335653832396633353135303130623966356666363539303731
|
||||
64636136663133333565323337363962373331353139363264363734333163376330643963626261
|
||||
30363735633336353232
|
||||
19
ansible/secrets/traefik_secrets.yaml
Normal file
19
ansible/secrets/traefik_secrets.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61613536643364316536313162363563363063613733376533613962366639646431326338363831
|
||||
3435633939646637333932613935396336343764353033340a303962393837343861643462643334
|
||||
38653163653139383764656336373166306137373538663635636234306536666332363537383562
|
||||
3238323166396336620a636263623931633361393366346433353738356139303036313263646534
|
||||
62336633663035376537323034613336663963376639633961666335623230636365393263393165
|
||||
33343333383834663964613334353332653061366135666462353636653338393865633866316630
|
||||
63306665323437383066303365363433363533353934316135623561396535343361383430613461
|
||||
66363961326465353433633937353062376334363936646163333135326333343262663531633032
|
||||
64636463643462613539303034636336363863343462656461343834316537376234356366663162
|
||||
36366666643263636632653661353333333236616137366238653330323038346665356661316638
|
||||
62303636643765613266373237386463376362343938646437643630656533663438333632386137
|
||||
30393964373161626263393863346466303436636530316430363861616635626165383436363864
|
||||
33393465366133383661346435323466313538383866663739663835353461313532333362663733
|
||||
62313531636663373964656662383031353462623064303161633838303932363438633366393438
|
||||
32623330313263363066396166623631656164353932323937323062363635393239646330353938
|
||||
39653133646430316464653264323638333664353635613166396261393832353564656266313566
|
||||
61393266353639383932643662643765396532336234623862306462643638326635353763373036
|
||||
3736373335346264323439643135313566363832313734363463
|
||||
Reference in New Issue
Block a user