# SPDX-FileCopyrightText: 2024 Matthew Fennell # # SPDX-License-Identifier: AGPL-3.0-only --- - name: Ensure XMPP server is set up gather_facts: false hosts: xmpp_server tasks: # Now, we create a non-root user with sudo privileges - name: Ensure wheel group exists remote_user: root ansible.builtin.group: name: wheel state: present - name: Ensure wheel group allows passwordless sudo remote_user: root ansible.builtin.lineinfile: dest: /etc/sudoers state: present regexp: "^%wheel" line: "%wheel ALL=(ALL) NOPASSWD: ALL" validate: visudo -cf %s - name: Ensure non-root admin account is created in wheel group remote_user: root ansible.builtin.user: name: admin groups: wheel append: true - name: Ensure admin ssh directory exists remote_user: root ansible.builtin.file: path: /home/admin/.ssh state: directory owner: admin group: admin mode: "0700" - name: Copy authorised keys to admin account remote_user: root ansible.builtin.copy: src: /root/.ssh/authorized_keys dest: /home/admin/.ssh/authorized_keys remote_src: true owner: admin group: admin mode: preserve - name: Ensure cloud-init is disabled ansible.builtin.copy: content: "" dest: /etc/cloud/cloud-init.disabled force: false owner: root group: root mode: "0644" become: true - name: Ensure hostname is set ansible.builtin.hostname: name: "{{ hostname }}" become: true - name: Ensure hostname is configured in /etc/hosts ansible.builtin.template: src: "{{ playbook_dir }}/files/hosts.j2" dest: /etc/hosts owner: root group: root mode: "0644" become: true - name: Retrieve DANE hash ansible.builtin.shell: cmd: > set -o pipefail && openssl x509 -in ~/.lego/certificates/{{ virtual_host }}.crt -noout -pubkey | openssl pkey -pubin -outform DER | openssl dgst -sha256 -binary | hexdump -ve '/1 "%02x"' register: dane_hash changed_when: false delegate_to: localhost # We allow status code 400 here as this is returned by deSEC if the domain # already exists. Ideally, we should filter out genuinely good/bad requests # here using the response. - name: Ensure domain exists in deSEC ansible.builtin.uri: url: https://desec.io/api/v1/domains/ method: POST status_code: [201, 400] body_format: json headers: Authorization: Token {{ desec_token }} body: name: "{{ virtual_host }}" register: request delegate_to: localhost - name: Ensure domain is registered ansible.builtin.uri: url: https://desec.io/api/v1/domains/{{ virtual_host }}/ method: GET headers: Authorization: Token {{ desec_token }} register: domain delegate_to: localhost - name: Ensure domain to register DS record is registered ansible.builtin.uri: url: https://desec.io/api/v1/domains/?owns_qname={{ parent_host }} method: GET headers: Authorization: Token {{ desec_token }} register: parent_domain delegate_to: localhost - name: Ensure DS is registered in parent domain ansible.builtin.uri: url: "https://desec.io/api/v1/domains/{{ domain_with_ds }}/rrsets/" method: PUT body_format: json headers: Authorization: Token {{ desec_token }} body: - subname: "{{ ds_subname }}" type: DS ttl: 3600 records: "{{ domain_keys }}" delegate_to: localhost when: domain_with_ds != "" - name: Ensure records are registered in subdomain ansible.builtin.uri: url: "https://desec.io/api/v1/domains/{{ virtual_host }}/rrsets/" method: PUT body_format: json headers: Authorization: Token {{ desec_token }} body: - subname: "conference" type: CNAME ttl: 3600 records: ["{{ delegate_host }}."] - subname: "upload" type: CNAME ttl: 3600 records: ["{{ delegate_host }}."] - subname: "_xmpp-client._tcp" type: SRV ttl: 3600 records: ["0 5 5222 {{ delegate_host }}."] - subname: "_xmpps-client._tcp" type: SRV ttl: 3600 records: ["0 5 5223 {{ delegate_host }}."] - subname: "_xmpp-server._tcp" type: SRV ttl: 3600 records: ["0 5 5269 {{ delegate_host }}."] - subname: "_xmpps-server._tcp" type: SRV ttl: 3600 records: ["0 5 5270 {{ delegate_host }}."] - subname: "_xmpps-server._tcp.upload" type: SRV ttl: 3600 records: ["0 5 5270 {{ delegate_host }}."] - subname: "_xmpps-server._tcp.conference" type: SRV ttl: 3600 records: ["0 5 5270 {{ delegate_host }}."] - subname: '{{ "_5222._tcp" + tlsa_appended_subdomain }}' type: TLSA ttl: 3600 records: ["3 1 1 {{ dane_hash.stdout }}"] - subname: '{{ "_5223._tcp" + tlsa_appended_subdomain }}' type: TLSA ttl: 3600 records: ["3 1 1 {{ dane_hash.stdout }}"] - subname: '{{ "_5269._tcp" + tlsa_appended_subdomain }}' type: TLSA ttl: 3600 records: ["3 1 1 {{ dane_hash.stdout }}"] - subname: '{{ "_5270._tcp" + tlsa_appended_subdomain }}' type: TLSA ttl: 3600 records: ["3 1 1 {{ dane_hash.stdout }}"] delegate_to: localhost # We specifically use apt instead of the more general package module here, # because we want to ensure the cache is updated before we try and install # anything. This is needed because, on a freh Debian install on AWS # Lightsail (as of 2024-02-08), nothing was returned after running apt # search borgmatic. Updating the cache before running apt install solved # this issue, but the package module does not support this functionality. - name: Ensure required packages are installed ansible.builtin.apt: name: - coturn # Audio / video calling server - libjs-bootstrap4 # Used by invite webpage - libjs-jquery # Used by invite webpage - lua-dbi-postgresql # Prosody postgres connection - nginx # Serve invite webpages - postgresql # Database - prosody # XMPP server - prosody-modules # Extra addons - python3-pexpect # Used by ansible expect role - python3-psycopg2 # Used by ansible postgres role - systemd-timesyncd # Used to make sure the date is correct - ufw # Firewall state: present update_cache: true become: true - name: Ensure required ports with ufw applications are open community.general.ufw: rule: allow name: "{{ item }}" state: enabled loop: - OpenSSH - Turnserver - WWW Full - XMPP become: true - name: Ensure other required tcp ports are open community.general.ufw: rule: allow port: "{{ item }}" proto: tcp state: enabled loop: - 5000 # XEP-0065 - 5223 # XEP-0368 - 5270 # XEP-0368 - 5280 # XEP-0363 - 5281 # XEP-0363 # - 5432 # Postgres become: true - name: Ensure other udp ports are open community.general.ufw: rule: allow port: "{{ item }}" proto: udp state: enabled loop: - 5000 # XEP-0065 - 5280 # XEP-0363 - 5281 # XEP-0363 become: true - name: Ensure default nginx config is removed ansible.builtin.file: path: "/etc/nginx/sites-enabled/default" state: absent become: true notify: Restart nginx - name: Ensure nginx config is installed ansible.builtin.template: src: "{{ playbook_dir }}/files/nginx_conf.j2" dest: /etc/nginx/sites-available/{{ virtual_host }} owner: root group: root mode: "0644" become: true notify: Restart nginx - name: Ensure nginx config is enabled ansible.builtin.file: src: /etc/nginx/sites-available/{{ virtual_host }} dest: /etc/nginx/sites-enabled/{{ virtual_host }} owner: root group: root state: link become: true notify: Restart nginx - name: Ensure turn is configured ansible.builtin.template: src: "{{ playbook_dir }}/files/turnserver.conf.j2" dest: /etc/turnserver.conf owner: root group: prosody mode: "0640" become: true notify: Restart coturn - name: Ensure prosody database is set up community.postgresql.postgresql_db: name: prosody become: true become_user: postgres - name: Ensure prosody role is created community.postgresql.postgresql_user: login_db: prosody name: prosody become: true become_user: postgres - name: Ensure prosody schema is created community.postgresql.postgresql_schema: login_db: prosody name: prosody owner: prosody become: true become_user: postgres register: my_result - name: Ensure prosody user exists on database community.postgresql.postgresql_user: name: prosody become: true become_user: postgres - name: Ensure prosody user has permissions on database community.postgresql.postgresql_privs: type: database login_db: prosody privs: ALL roles: prosody become: true become_user: postgres - name: Ensure prosody user has permissions on schema community.postgresql.postgresql_privs: type: table login_db: prosody objs: ALL_IN_SCHEMA privs: ALL roles: prosody become: true become_user: postgres - name: Ensure top-level prosody configuration is installed ansible.builtin.template: src: "{{ playbook_dir }}/files/prosody.cfg.lua.j2" dest: /etc/prosody/prosody.cfg.lua owner: root group: prosody mode: "0640" become: true notify: Restart prosody - name: Ensure host-specific prosody configuration is available ansible.builtin.template: src: "{{ playbook_dir }}/files/virtual_host.cfg.lua.j2" dest: "/etc/prosody/conf.avail/{{ virtual_host }}.cfg.lua" owner: root group: prosody mode: "0644" become: true notify: Restart prosody - name: Ensure host-specific prosody configuration is set ansible.builtin.file: src: "/etc/prosody/conf.avail/{{ virtual_host }}.cfg.lua" dest: "/etc/prosody/conf.d/{{ virtual_host }}.cfg.lua" owner: root group: prosody state: link become: true notify: Restart prosody - name: Ensure localhost prosody configuration is removed ansible.builtin.file: path: "/etc/prosody/conf.d/localhost.cfg.lua" state: absent become: true notify: Restart prosody - name: Ensure localhost prosody configuration is not available ansible.builtin.file: path: "/etc/prosody/conf.avail/localhost.cfg.lua" state: absent become: true - name: Ensure example prosody configuration is not available ansible.builtin.file: path: "/etc/prosody/conf.avail/example.com.cfg.lua" state: absent become: true - name: Ensure prosody is enabled ansible.builtin.service: name: prosody enabled: true become: true # Vultr adds a custom sshd_config file that enabled password authentication. # I don't want this to be enabled, since I'm already copying the public key. - name: Ensure password authentication is not explicitly enabled ansible.builtin.file: path: "/etc/ssh/sshd_config.d/50-cloud-init.conf" state: absent become: true notify: Restart sshd - name: Ensure password based authentication is disabled ansible.builtin.copy: src: "{{ playbook_dir }}/files/50-disable-password-auth.conf" dest: "/etc/ssh/sshd_config.d/50-disable-password-auth.conf" owner: root group: root mode: "0644" become: true notify: Restart sshd - name: Ensure unattended upgrades config is installed ansible.builtin.copy: src: "{{ playbook_dir }}/files/50unattended-upgrades" dest: "/etc/apt/apt.conf.d/50unattended-upgrades" owner: root group: root mode: "0644" become: true handlers: - name: Restart prosody ansible.builtin.service: name: prosody state: restarted become: true - name: Restart coturn ansible.builtin.service: name: coturn state: restarted become: true - name: Restart sshd ansible.builtin.service: name: sshd state: restarted become: true - name: Restart nginx ansible.builtin.service: name: nginx state: restarted become: true vars: domain_keys: >- {{- domain.json["keys"] | map(attribute='ds') | flatten | select("search", " 13 2 ") -}} parent_host: "{{ virtual_host.split('.')[1:] | join('.') }}" # When the virtual host is on a subdomain, we want a DS record on the parent # domain. This is so we can manage the subdomain as a separate domain on # deSEC, and enforce some separation between nonprod and prod. # # If virtual_host is "continuous.nonprod.example.org", then domain_with_ds # is "example.org". domain_with_ds: >- {{- parent_domain.json | map(attribute='name') | first | default("") -}} # To register the DS record, we need the subname to point to. # # If virtual_host is "continuous.nonprod.example.org", then ds_subname is # "continuous.nonprod". ds_subname: "{{ virtual_host | regex_replace('.' + domain_with_ds, '') }}" delegate_host: >- {{- virtual_host if delegate_prefix == "" else delegate_prefix + "." + virtual_host -}} tlsa_appended_subdomain: >- {{- "" if delegate_prefix == "" else "." + delegate_prefix -}}