diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 3532128a0..e05e2da88 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1 @@ -# These are supported funding model platforms - github: 'linuxfabrik' diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..761dd617a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,52 @@ +name: 'Bug report' +description: 'Submit a bug report for LFOps' +labels: + - 'bug' +body: + - type: 'checkboxes' + id: 'before-posting' + attributes: + label: 'Before submitting:' + options: + - label: 'I have searched existing issues to make sure this is not a duplicate.' + required: true + - label: 'I am using the latest release.' + required: true + - label: 'I agree to follow the [Code of Conduct](https://github.com/Linuxfabrik/lfops/blob/main/CODE_OF_CONDUCT.md).' + required: true + + - type: 'textarea' + id: 'bug-description' + attributes: + label: 'Bug description' + description: 'Provide a clear and concise description of the bug.' + validations: + required: true + + - type: 'textarea' + id: 'steps' + attributes: + label: 'Steps to reproduce' + description: 'List the steps needed to reproduce the issue.' + validations: + required: true + + - type: 'textarea' + id: 'expected' + attributes: + label: 'Expected behavior' + description: 'What did you expect to happen?' + + - type: 'textarea' + id: 'environment' + attributes: + label: 'Environment' + description: 'Operating system, software version, and any other relevant details.' + validations: + required: true + + - type: 'textarea' + id: 'additional' + attributes: + label: 'Additional context' + description: 'Any other information, logs, or screenshots.' diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..a839cfaab --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,24 @@ +name: 'Feature request' +description: 'Suggest a new feature or improvement for LFOps' +labels: + - 'enhancement' +body: + - type: 'textarea' + id: 'solution' + attributes: + label: 'Describe the solution you would like' + description: 'A clear and concise description of what you want to happen.' + validations: + required: true + + - type: 'textarea' + id: 'alternatives' + attributes: + label: 'Alternatives considered' + description: 'Have you considered any alternative solutions or workarounds?' + + - type: 'textarea' + id: 'context' + attributes: + label: 'Additional context' + description: 'Any other context or screenshots about the feature request.' diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..78a831972 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: + - package-ecosystem: 'github-actions' + directory: '/' + schedule: + interval: 'weekly' + day: 'friday' + time: '05:00' + timezone: 'Etc/UTC' diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000..c6e539fbe --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,52 @@ +name: 'CodeQL' + +on: + push: + branches: + - 'main' + pull_request: + branches: + - 'main' + schedule: + - cron: '0 5 * * 5' + +permissions: + contents: 'read' + +jobs: + analyze: + name: 'Analyze' + runs-on: 'ubuntu-latest' + permissions: + actions: 'read' + contents: 'read' + security-events: 'write' + + strategy: + fail-fast: false + matrix: + language: + - 'actions' + - 'python' + + steps: + - name: 'Harden Runner' + uses: 'step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176' # v2.17.0 + with: + egress-policy: 'audit' + + - name: 'Checkout repository' + uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6.0.2 + + - name: 'Initialize CodeQL' + uses: 'github/codeql-action/init@c10b8064de6f491fea524254123dbe5e09572f13' # v4.35.1 + with: + languages: '${{ matrix.language }}' + + - name: 'Autobuild' + uses: 'github/codeql-action/autobuild@c10b8064de6f491fea524254123dbe5e09572f13' # v4.35.1 + + - name: 'Perform CodeQL Analysis' + uses: 'github/codeql-action/analyze@c10b8064de6f491fea524254123dbe5e09572f13' # v4.35.1 + with: + category: '/language:${{ matrix.language }}' diff --git a/.github/workflows/dependabot-auto-merge.yml b/.github/workflows/dependabot-auto-merge.yml new file mode 100644 index 000000000..d459cc5c3 --- /dev/null +++ b/.github/workflows/dependabot-auto-merge.yml @@ -0,0 +1,25 @@ +name: 'Linuxfabrik: Dependabot auto-merge' + +on: + pull_request: {} + +permissions: + contents: 'write' + pull-requests: 'write' + +jobs: + auto-merge: + runs-on: 'ubuntu-latest' + if: 'github.actor == ''dependabot[bot]''' + steps: + + - uses: 'dependabot/fetch-metadata@v3' + id: 'meta' + + - if: >- + steps.meta.outputs.update-type == 'version-update:semver-patch' + || steps.meta.outputs.update-type == 'version-update:semver-minor' + run: 'gh pr merge --auto --squash "$PR_URL"' + env: + PR_URL: '${{ github.event.pull_request.html_url }}' + GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}' diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml new file mode 100644 index 000000000..565015fcf --- /dev/null +++ b/.github/workflows/dependency-review.yml @@ -0,0 +1,22 @@ +name: 'Dependency Review' + +on: + pull_request: {} + +permissions: + contents: 'read' + +jobs: + dependency-review: + runs-on: 'ubuntu-latest' + steps: + - name: 'Harden Runner' + uses: 'step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176' # v2.17.0 + with: + egress-policy: 'audit' + + - name: 'Checkout repository' + uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6.0.2 + + - name: 'Dependency Review' + uses: 'actions/dependency-review-action@2031cfc080254a8a887f58cffee85186f0e49e48' # v4.9.0 diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 000000000..48ee44fe9 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,62 @@ +name: 'Linuxfabrik: Deploy Documentation' + +on: + push: + branches: + - 'main' + +permissions: + contents: 'read' + pages: 'write' + id-token: 'write' + +concurrency: + group: 'pages' + cancel-in-progress: true + +jobs: + build: + runs-on: 'ubuntu-latest' + steps: + - name: 'Harden the runner (Audit all outbound calls)' + uses: 'step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176' # v2.17.0 + with: + egress-policy: 'audit' + + - name: 'Checkout repository' + uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6.0.2 + + - name: 'Set up Python' + uses: 'actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405' # v6.2.0 + with: + python-version: '3.12' + + - name: 'Install dependencies' + run: 'pip install mkdocs==1.6.1 mkdocs-material==9.7.6' + + - name: 'Generate docs structure' + run: 'python3 tools/build-docs' + + - name: 'Build documentation' + run: 'mkdocs build --strict' + + - name: 'Upload Pages artifact' + uses: 'actions/upload-pages-artifact@fc324d3547104276b827a68afc52ff2a11cc49c9' # v5.0.0 + with: + path: 'site' + + deploy: + needs: 'build' + runs-on: 'ubuntu-latest' + environment: + name: 'github-pages' + url: '${{ steps.deployment.outputs.page_url }}' + steps: + - name: 'Harden the runner (Audit all outbound calls)' + uses: 'step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176' # v2.17.0 + with: + egress-policy: 'audit' + + - name: 'Deploy to GitHub Pages' + id: 'deployment' + uses: 'actions/deploy-pages@cd2ce8fcbc39b97be8ca5fce6e763baed58fa128' # v5.0.0 diff --git a/.github/workflows/lf-build.yml b/.github/workflows/lf-build.yml index c7215093f..104cb9e82 100644 --- a/.github/workflows/lf-build.yml +++ b/.github/workflows/lf-build.yml @@ -15,22 +15,28 @@ on: - 'v*' # modify the default permissions granted to the GITHUB_TOKEN -permissions: - contents: 'read' # to checkout the code - packages: 'write' # to push to GitHub Container Registry +permissions: 'read-all' jobs: build: runs-on: 'ubuntu-latest' + permissions: + contents: 'read' # to checkout the code + packages: 'write' # to push to GitHub Container Registry steps: + - name: 'Harden the runner (Audit all outbound calls)' + uses: 'step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176' # v2.17.0 + with: + egress-policy: 'audit' + - name: 'git clone https://github.com/Linuxfabrik/lfops' - uses: 'actions/checkout@v4' + uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6.0.2 - name: 'Log in to GitHub Container Registry' - uses: 'redhat-actions/podman-login@v1' + uses: 'redhat-actions/podman-login@4934294ad0449894bcd1e9f191899d7292469603' # v1.7 with: registry: 'ghcr.io' username: '${{ github.actor }}' @@ -65,15 +71,18 @@ jobs: - name: 'Install Ansible Builder' run: | python3 -m pip install --upgrade pip - pip install ansible-builder + pip install ansible-builder==3.1.1 + + - name: 'Strip badges from README.md (not rendered correctly on Galaxy)' + run: | + sed --in-place '/
/,/<\/div>/d' README.md - name: 'Build Collection' run: | ansible-galaxy collection build cp --verbose linuxfabrik-lfops-${{ env.TAG1 }}.tar.gz linuxfabrik-lfops.tar.gz - - name: 'Publish to Galaxy (Prod)' - if: "${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') }}" + - name: 'Publish to Galaxy' env: ANSIBLE_GALAXY_TOKEN: '${{ secrets.GALAXY_API_KEY_PROD }}' run: | @@ -82,16 +91,6 @@ jobs: --server https://galaxy.ansible.com \ --api-key "$ANSIBLE_GALAXY_TOKEN" - - name: 'Publish to Galaxy (Dev)' - if: "${{ github.event_name == 'workflow_dispatch' }}" - env: - ANSIBLE_GALAXY_TOKEN: '${{ secrets.GALAXY_API_KEY_DEV }}' - run: | - ansible-galaxy collection publish \ - linuxfabrik-lfops-${{ env.TAG1 }}.tar.gz \ - --server https://galaxy-dev.ansible.com \ - --api-key "$ANSIBLE_GALAXY_TOKEN" - - name: 'Build Execution Environment' run: | ansible-builder build \ @@ -106,7 +105,7 @@ jobs: - name: 'Push to GitHub Container Registry' id: 'push-to-ghcr' - uses: 'redhat-actions/push-to-registry@v2' + uses: 'redhat-actions/push-to-registry@5ed88d269cf581ea9ef6dd6806d01562096bee9c' # v2.8 with: registry: 'ghcr.io' image: '${{ env.GITHUB_REPOSITORY_OWNER_LOWERCASE }}/lfops_ee' diff --git a/.github/workflows/lf-release.yml b/.github/workflows/lf-release.yml index a807f33fc..3d3ac2eba 100644 --- a/.github/workflows/lf-release.yml +++ b/.github/workflows/lf-release.yml @@ -6,17 +6,23 @@ on: - 'v*' # modify the default permissions granted to the GITHUB_TOKEN -permissions: - contents: 'write' # to push to the repo and create the release +permissions: 'read-all' jobs: release: runs-on: 'ubuntu-latest' + permissions: + contents: 'write' # to push to the repo and create the release steps: + - name: 'Harden the runner (Audit all outbound calls)' + uses: 'step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176' # v2.17.0 + with: + egress-policy: 'audit' + - name: 'Create GitHub Release for ${{ github.ref_name }}' - uses: 'softprops/action-gh-release@v2' + uses: 'softprops/action-gh-release@b4309332981a82ec1c5618f44dd2e27cc8bfbfda' # v3.0.0 with: tag_name: '${{ github.ref_name }}' body: | diff --git a/.github/workflows/pre-commit-autoupdate.yml b/.github/workflows/pre-commit-autoupdate.yml new file mode 100644 index 000000000..088e35945 --- /dev/null +++ b/.github/workflows/pre-commit-autoupdate.yml @@ -0,0 +1,42 @@ +name: 'Linuxfabrik: Update pre-commit hooks' + +on: + schedule: + - cron: '0 5 * * 5' + workflow_dispatch: {} + +permissions: 'read-all' + +jobs: + update: + runs-on: 'ubuntu-latest' + permissions: + contents: 'write' + pull-requests: 'write' + steps: + - name: 'Harden the runner (Audit all outbound calls)' + uses: 'step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176' # v2.17.0 + with: + egress-policy: 'audit' + + - name: 'Checkout repository' + uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6.0.2 + + - name: 'Set up Python' + uses: 'actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405' # v6.2.0 + with: + python-version: '3.12' + + - name: 'Install pre-commit' + run: 'pip install pre-commit==4.5.1' + + - name: 'Run pre-commit autoupdate' + run: 'pre-commit autoupdate' + + - name: 'Create Pull Request' + uses: 'peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1' # v8.1.1 + with: + commit-message: 'chore: update pre-commit hooks' + title: 'chore: update pre-commit hooks' + body: 'Automatic pre-commit hook version update.' + branch: 'chore/pre-commit-autoupdate' diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 000000000..631647892 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,47 @@ +name: 'Scorecard supply-chain security' + +on: + branch_protection_rule: {} + schedule: + - cron: '0 5 * * 5' + workflow_dispatch: {} + +permissions: 'read-all' + +jobs: + analysis: + name: 'Scorecard analysis' + runs-on: 'ubuntu-latest' + permissions: + security-events: 'write' + id-token: 'write' + + steps: + - name: 'Harden Runner' + uses: 'step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176' # v2.17.0 + with: + egress-policy: 'audit' + + - name: 'Checkout code' + uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6.0.2 + with: + persist-credentials: false + + - name: 'Run analysis' + uses: 'ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a' # v2.4.3 + with: + results_file: 'results.sarif' + results_format: 'sarif' + publish_results: true + + - name: 'Upload artifact' + uses: 'actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a' # v7.0.1 + with: + name: 'SARIF file' + path: 'results.sarif' + retention-days: 5 + + - name: 'Upload to code-scanning' + uses: 'github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13' # v4.35.1 + with: + sarif_file: 'results.sarif' diff --git a/.gitignore b/.gitignore index 3bb399875..60447e160 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,19 @@ playbooks/test.yml roles/test context/ particle/.vagrant +<<<<<<< HEAD + +# mkdocs documentation +/docs/CHANGELOG.md +/docs/CODE_OF_CONDUCT.md +/docs/compatibility.md +/docs/contributing.md +/docs/playbooks.md +/docs/roles/ +/docs/security.md +/docs/stigs.md +/mkdocs.yml +/site +======= +stig/stig.db +>>>>>>> 0a78d88e (feat: implement Rocky Linux 9 CIS) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c4aa3b82e..438720bf2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,49 +1,44 @@ -# See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks - repos: + - repo: 'https://github.com/adrienverge/yamllint' + rev: 'v1.38.0' + hooks: + - id: 'yamllint' + args: ['-c=.yamllint.yml'] + files: '\.(yaml|yml)$' + types: ['file', 'yaml'] + entry: 'yamllint' + - repo: 'https://github.com/pre-commit/pre-commit-hooks' - rev: 'v4.6.0' + rev: 'v6.0.0' hooks: - - id: 'trailing-whitespace' - - id: 'end-of-file-fixer' - id: 'check-added-large-files' + - id: 'check-ast' + - id: 'fix-byte-order-marker' - id: 'check-case-conflict' - id: 'check-executables-have-shebangs' - id: 'check-json' + - id: 'check-merge-conflict' - id: 'check-yaml' args: ['--unsafe'] - - id: 'detect-private-key' - - id: 'check-ast' - - id: 'check-byte-order-marker' - - id: 'check-merge-conflict' - id: 'debug-statements' + - id: 'detect-private-key' + - id: 'end-of-file-fixer' - id: 'mixed-line-ending' + - id: 'trailing-whitespace' - # - repo: 'https://github.com/pycqa/isort' - # rev: '5.10.1' - # hooks: - # - id: 'isort' - - - repo: 'https://github.com/adrienverge/yamllint.git' - rev: 'v1.35.1' + - repo: 'https://github.com/PyCQA/bandit' + rev: '1.9.4' hooks: - - id: 'yamllint' - args: ['-c=.yamllint.yml'] - files: '\.(yaml|yml)$' - types: ['file', 'yaml'] - entry: 'yamllint' + - id: 'bandit' + args: + - '--severity-level=low' + - '--confidence-level=low' + - '--skip=B110,B112,B311' # graceful-degradation patterns, non-crypto randomness + types_or: ['python'] -# - repo: 'local' -# hooks: -# - id: 'pylint' -# name: 'pylint' -# entry: 'pylint' -# language: 'system' -# types: [python] -# args: -# [ -# "-rn", # Only display messages -# "-sn", # Don't display the score -# "--disable=C0103,C0114,C0116,C0301", -# ] + - repo: 'https://github.com/jendrikseipp/vulture' + rev: 'v2.16' + hooks: + - id: 'vulture' + args: ['--min-confidence=80'] + types_or: ['python'] diff --git a/CHANGELOG.md b/CHANGELOG.md index 58718e6e6..0a8bc6835 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,41 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -> **Maintainer note:** Always add new entries to the top of the Unreleased section (newest first), even if this results in multiple entries for the same role. This way users only need to read the new entries at the top. +> **Maintainer note:** Always add new entries to the top of the Unreleased section (newest first; correct subsection), even if this results in multiple entries for the same role. This way users only need to read the new entries at the top. ## [Unreleased] +### Added + +* **ci**: Add bandit (security) and vulture (dead code) to pre-commit hooks + +### Fixed + +* **execution-environment**: Add missing `sshpass` system package, required for SSH password-based connections (e.g. `--ask-pass`) +* **role:keycloak**: Fix transaction timeout silently dropping from 3600s to 300s on Keycloak 26.6.0+ due to new `transaction-default-timeout` CLI option overriding the Quarkus property +* **role:keycloak**: Fix MariaDB database encoding defaulting to deprecated `utf8` (`utf8mb3`) instead of `utf8mb4`, causing warnings in Keycloak 26.6.0+ +* **ci**: Fix pip installs by replacing `--require-hashes` with pinned versions to allow Dependabot updates +* **role:mount**: Fix `when` condition for NFS/CIFS client package installation failing with multiple mounts and when `state` key is undefined + +### Changed + +* **all roles**: Rewrite all role READMEs to use the new standard format: replace markdown tables with bullet lists for tags and variables, convert HTML/blockquote subkeys to expanded indented format, standardize terminology (`Bool` not `Boolean`, `Mandatory` not `Required`) +* **role:opensearch**: Rewrite README with step-by-step cluster setup guide, single-node section, post-installation steps, and improved variable documentation +* **role:elasticsearch**: Improve README with single-node section and clearer explanation of the manual certificate approach for cluster setup +* **COMPATIBILITY**: Add missing `crypto_policy` RHEL 10 entry +* **COMPATIBILITY**: Remove Debian 11 and Ubuntu 20.04 columns (EOL) + + +## [v6.0.1] - 2026-04-07 + +### Fixed + +* **ci**: Strip badges from README.md before publishing to Galaxy, as external images are not rendered + + +## [v6.0.0] - 2026-04-07 + ### Breaking Changes * **role:nfs_server**: Rework `nfs_server__exports` from a list of strings to a list of dictionaries with new `path`, `clients`, `owner`, `group`, and `mode` subkeys @@ -18,6 +48,20 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +* Add MkDocs-based documentation site, deployed automatically to GitHub Pages via `tools/build-docs` and a GitHub Actions workflow +* **CONTRIBUTING**: Document semantic parameter ordering for Ansible modules +* **playbooks**: Add `example.yml` and `setup_example.yml` playbooks as development references +* **role:example**: Add complete example role with defaults, handlers, tasks, templates, and vars as a reference for consistent role development +* **role:icingaweb2_module_grafana**: Add JWT support +* **role:grafana**: Add JWT support +* Add `playbooks/README.md` documenting all playbooks with their roles in execution order and available skip variables +* **role:apache_httpd**: Add platform-specific behavior section, wsgi example, and document localhost endpoints in README +* **role:apache_httpd**: Add skip variables section to README linking to relevant playbooks +* **role:mailx**: Add skip variables section to README linking to relevant playbooks +* **role:policycoreutils**: Add skip variables section to README linking to relevant playbooks +* **role:yum_utils**: Add skip variables section to README linking to relevant playbooks +* **plugin:bitwarden_item**: Add file-based item cache to reduce `bw serve` API calls, preventing crashes under load. Cache is stored in `$XDG_RUNTIME_DIR` (RAM-backed tmpfs) with `/tmp` fallback. After create/edit operations, the cache is updated inline to avoid expensive full re-syncs, with a 1-second sleep as rate limit to prevent Bitwarden API errors. Convert `is_unlocked` to a property to fix it never being called. +* **role:freeipa_server**: Add `--diff` support for all FreeIPA modules and add `freeipa_server:configure` tag * **role:mariadb_server**: Add `mariadb_server__cnf_wsrep_log_conflicts` and `mariadb_server__cnf_wsrep_retry_autocommit` variables * **role:mariadb_server**: Add `mariadb_server__cnf_wsrep_gtid_mode` variable to configure `wsrep_gtid_mode` for Galera * **role:openvpn_server**: Add `openvpn_server:crl` tag to allow deploying the certificate revocation list independently @@ -53,6 +97,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +* **ci**: Publish pre-releases directly to prod Ansible Galaxy instead of galaxy-dev, since it is unreliable and pulp-ansible excludes pre-release versions from "latest" +* Update pre-commit hooks to latest versions +* Unify CONTRIBUTING and convert from reStructuredText to Markdown +* **roles**: Add `backup: true` to all `ansible.builtin.template` tasks to ensure config file backups before overwriting * **role:nextcloud**: Refactor `nextcloud-update.j2` * **role:keycloak**: Rework `keycloak.conf` template to match Keycloak's default config structure * **role:apache_httpd**: bump Core Rule Set to 4.24.1 @@ -62,6 +110,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed +* **role:apache_httpd**: Fix `apache_httpd__mod_security_coreruleset_version` default value in README (4.4.0 -> 4.24.1), fix prefork variable names in README (`spare_threads` -> `spare_servers`), fix various typos ("best practise", "Tipp") +* **role:mailx**: Fix grammar in task name ("make" -> "makes"), sort template module parameters alphabetically +* **role:policycoreutils**: Fix grammar in task name ("are" -> "is") +* **plugin:bitwarden_item**: Fix missing `raise` in multipart error handling, `break` instead of `continue` in multi-term lookup, `folder_id` wrongly typed as `list` instead of `str` in module, notes default mismatch between documentation and code, and wrong "lookup plugin" wording in module documentation +* **role:mirror**: Fix missing `0440` permissions on sudoers file +* **role:login**: Rename sudoers file from `lfops_login` to `linuxfabrik` to match the kickstart configuration; remove the old file automatically * **roles**: Fix Ansible 2.19 deprecation warning for conditional results of type `int` by using `| length > 0` instead of `| length` * **role:firewall**: Fix fwbuilder repo clone being skipped when `run_once` picks a host without `firewall__fwbuilder_repo_url` * **role:sshd**: Validate sshd config with `sshd -t` before reloading the service @@ -669,7 +723,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * **module_util:gnupg**: Add new module util -[Unreleased]: https://github.com/Linuxfabrik/lfops/compare/v5.1.0...HEAD +[Unreleased]: https://github.com/Linuxfabrik/lfops/compare/v6.0.1...HEAD +[v6.0.1]: https://github.com/Linuxfabrik/lfops/compare/v6.0.0...v6.0.1 +[v6.0.0]: https://github.com/Linuxfabrik/lfops/compare/v5.1.0...v6.0.0 [v5.1.0]: https://github.com/Linuxfabrik/lfops/compare/v5.0.0...v5.1.0 [v5.0.0]: https://github.com/Linuxfabrik/lfops/compare/v4.0.0...v5.0.0 [v4.0.0]: https://github.com/Linuxfabrik/lfops/compare/v3.0.0...v4.0.0 diff --git a/COMPATIBILITY.md b/COMPATIBILITY.md index 791e8d8a8..c958f0427 100644 --- a/COMPATIBILITY.md +++ b/COMPATIBILITY.md @@ -3,182 +3,182 @@ Which Ansible role is proven to run on which OS? ``` --------------------------------------+----+----+----+---+---+----+-------+-------+-------+----------- - | Debian | RHEL | Ubuntu | other -Role | 11 | 12 | 13 | 8 | 9 | 10 | 20.04 | 22.04 | 24.04 | --------------------------------------+----+----+----+---+---+----+-------+-------+-------+----------- -acme_sh | | x | x | x | x | x | | | x | -alternatives | x | x | x | x | x | | x | x | x | -ansible_init | | | | | | | | | | Fedora 35+ -apache_httpd | | x | x | x | x | x | | | x | -apache_solr | | | | x | x | | | | | -apache_tomcat | | | | x | x | | | | | -apps | | | | x | x | x | | | | -at | | | | x | x | | | | | Fedora 35 -audit | | | | x | x | | | | | -bind | | | | x | x | x | | | | -blocky | | | | x | x | | | | | -borg_local | | | | x | | | | | | -chrony | | | | x | x | x | | | | -clamav | | | | x | x | | | | | -cloud_init | | | | x | x | | | | | -cockpit | | | | x | x | | | | | Fedora 35 -collabora | | | | x | x | | | | | -collect_rpmnew_rpmsave | | | | x | x | | | | | Fedora 40 -coturn | | | | x | x | | | | | -crypto_policy | | | | x | x | | | | | -dnf_makecache | | | | x | x | | | | | -dnf_versionlock | | | | x | x | | | | | Fedora 40 -docker | | | | x | | | | | | -duplicity | | | | x | x | x | | | | Fedora 35 -elastic_agent | | | | | x | | | | x | -elastic_agent_fleet_server | | | | | x | | | | x | -elasticsearch | | | | x | x | | | | x | -exoscale_vm | | | | | | | | | | Fedora 35+ -fail2ban | | | | x | x | | | | | -fangfrisch | | | | | x | | | | | -files | | | | x | x | x | | | | -firewall | | | | x | x | | | | | -freeipa_client | | | | x | x | x | | | | -freeipa_server | | | | x | x | x | | | | -github_project_createrepo | | | | x | | | | | | -gitlab_ce | | | | x | | | | | | -glances | | | | x | x | | | | | -glpi_agent | | | | x | x | | | | | -grafana | | | | x | x | x | | | | -grafana_grizzly | | | | x | x | | | | | -grav | | | | x | | | | | | -graylog_datanode | | x | x | x | x | | | | | -graylog_server | x | x | x | x | | | | | | -haveged | | | | x | x | | | | | -hetzner_vm | | | | | | | | | | Fedora 35+ -hostname | | | | x | x | | | | | --------------------------------------+----+----+----+---+---+----+-------+-------+-------+----------- - | Debian | RHEL | Ubuntu | other -Role | 11 | 12 | 13 | 8 | 9 | 10 | 20.04 | 22.04 | 24.04 | --------------------------------------+----+----+----+---+---+----+-------+-------+-------+----------- -icinga2_agent | | | x | x | x | | | | | Fedora 35 -icinga2_master | | x | x | x | x | | | | | -icingadb | | x | x | x | x | | | | | -icingadb_web | | x | x | x | x | | | | | -icinga_kubernetes | | | | | x | | | | | -icinga_kubernetes_web | | | | | x | | | | | -icingaweb2 | | x | x | x | x | | | | | -icingaweb2_module_businessprocess | | x | x | x | x | | | | | -icingaweb2_module_company | | x | x | x | x | | | | | -icingaweb2_module_cube | | x | x | x | x | | | | | -icingaweb2_module_director | | x | x | x | x | | | | | -icingaweb2_module_doc | | x | x | x | x | | | | | -icingaweb2_module_fileshipper | | x | x | x | x | | | | | -icingaweb2_module_generictts | | x | x | x | x | | | | | -icingaweb2_module_grafana | | x | x | x | x | | | | | -icingaweb2_module_incubator | | x | x | x | x | | | | | -icingaweb2_module_jira | | | | x | x | | | | | -icingaweb2_module_monitoring | | | | x | | | | | | -icingaweb2_module_pdfexport | | x | x | x | x | | | | | -icingaweb2_module_reporting | | | | x | | | | | | -icingaweb2_module_vspheredb | | x | x | x | x | | | | | -icingaweb2_module_x509 | | x | x | x | x | | | | | -icingaweb2_theme_linuxfabrik | | | | x | x | | | | x | -influxdb | | x | x | x | x | | | | | -infomaniak_vm | | | | | | | | | | Fedora 35+ -kdump | | | | x | x | | | | | -keepalived | | | | x | | | | | | -kernel_settings | - | x | x | x | x | | - | | | -keycloak | | | | x | | | | | | -kibana | | | | | x | | | | x | -kvm_host | | | | x | | | | | x | -kvm_vm | | | | x | | | | | x | -libmaxminddb | | | | x | | | | | | -librenms | | | | x | | | | | | -libreoffice | | | | x | | | | | | -login | | | | x | x | | | | | Fedora 35+ -logrotate | | | | x | x | x | | | | Fedora -logstash | | | | | x | | | | x | -lvm | | | | | | | | | | -mailto_root | | | | x | x | | | | | -mailx | x | x | x | x | x | x | | | | Fedora -mariadb_server | | x | x | x | x | | | | | Galera on Debian is untested -mastodon | | | | | x | | | | | -maxmind_geoip | | | | x | | | | | | -minio_client | | | | x | | | | | | -mirror | | | | x | x | | | | | -mod_maxminddb | | | | x | | | | | | -mongodb | x | x | x | x | x | | | | | -monitoring_plugins | | | | x | x | | | | | Debian 9, Fedora, Suse, Windows -monitoring_plugins_grafana_dashboards| | | | x | x | | | | | --------------------------------------+----+----+----+---+---+----+-------+-------+-------+----------- - | Debian | RHEL | Ubuntu | other -Role | 11 | 12 | 13 | 8 | 9 | 10 | 20.04 | 22.04 | 24.04 | --------------------------------------+----+----+----+---+---+----+-------+-------+-------+----------- -moodle | - | - | - | x | x | | | | | -motd | | | | x | x | | | | | -mount | x | | | x | x | | | | | -network | - | - | - | x | x | | | | | -nextcloud | | | | x | x | | | | | -nfs_client | x | | | x | x | | | | | -nfs_server | x | | | x | | | | | | -nodejs | | | | x | x | | | | | -objectstore_backup | | | | x | | | | | | -opensearch | x | | | x | | | | | | -open_vm_tools | | | | x | x | | | | | -openvpn_server | | | | x | x | | | | | -php | | x | x | x | x | | | | | -podman_containers | | | | | x | | | | | -policycoreutils | | | | x | x | x | | | | Fedora 35 -postfix | x | x | x | x | x | x | | | | Fedora 35 -postgresql_server | | | | x | | | | | | -proxysql | | | | x | | | | | | -python | | x | x | x | x | | | | | Windows -python_venv | x | x | x | x | x | | | | | Fedora 35 -qemu_guest_agent | | | | x | x | x | | | | -redis | | x | x | x | x | | | | | -repo_baseos | | | | x | x | x | | | | -repo_collabora | | | | x | | | | | | -repo_collabora_code | | | | x | x | | | | | -repo_debian_base | x | | | - | - | | | | | -repo_docker | | | | x | | | | | | -repo_elasticsearch | | | | x | x | | | | x | -repo_epel | | | | x | x | x | | | | -repo_gitlab_ce | | | | x | | | | | | -repo_gitlab_runner | | | | x | | | | | | -repo_grafana | | x | x | x | x | | | | | -repo_graylog | x | x | x | x | | | | | | -repo_icinga | | x | x | x | x | | | x | | -repo_influxdb | x | x | x | x | x | | | | | -repo_mariadb | | x | x | x | x | | | | x | -repo_mongodb | x | x | x | x | x | | | | | -repo_monitoring_plugins | x | | | x | x | | x | x | | -repo_mydumper | | | | x | x | | | | | -repo_opensearch | x | x | x | x | | | x | x | x | -repo_postgresql | | | | | x | | | | | -repo_proxysql | | | | x | x | | | | | -repo_redis | | x | x | | | | | | | --------------------------------------+----+----+----+---+---+----+-------+-------+-------+----------- - | Debian | RHEL | Ubuntu | other -Role | 11 | 12 | 13 | 8 | 9 | 10 | 20.04 | 22.04 | 24.04 | --------------------------------------+----+----+----+---+---+----+-------+-------+-------+----------- -repo_remi | | | | x | x | | | | | Fedora 35 -repo_rpmfusion | | | | x | | | | | | -repo_sury | x | x | x | - | - | | | | | -rocketchat | | | | x | | | | | | Fedora 35 -rsyslog | | | | x | x | | | | | -selinux | | | | x | x | | | | | -shell | | | | x | x | x | | | | -snmp | | | | x | x | | | | | -squid | | | | | x | | | | | -sshd | | x | x | x | x | x | | | | Fedora 40 -system_update | x | x | x | x | x | | | | | -systemd_journald | | | | x | x | | | | | -systemd_unit | | | | x | x | x | | | | -telegraf | | | | x | | | | | | -timezone | | | | x | x | | | | | Fedora 35 -tools | | | | x | x | | | | | Fedora -unattended_upgrades | x | | | | | | | | | -vsftpd | | | | x | | | | | | -wordpress | | | | x | | | | | | -yum_utils | | | | x | x | x | | | | Fedora 35 --------------------------------------+----+----+----+---+---+----+-------+-------+-------+----------- +-------------------------------------+----+----+---+---+----+-------+-------+----------- + | Debian | RHEL | Ubuntu | other +Role | 12 | 13 | 8 | 9 | 10 | 22.04 | 24.04 | +-------------------------------------+----+----+---+---+----+-------+-------+----------- +acme_sh | x | x | x | x | x | | x | +alternatives | x | x | x | x | | x | x | +ansible_init | | | | | | | | Fedora 35+ +apache_httpd | x | x | x | x | x | | x | +apache_solr | | | x | x | | | | +apache_tomcat | | | x | x | | | | +apps | | | x | x | x | | | +at | | | x | x | | | | Fedora 35 +audit | | | x | x | | | | +bind | | | x | x | x | | | +blocky | | | x | x | | | | +borg_local | | | x | | | | | +chrony | | | x | x | x | | | +clamav | | | x | x | | | | +cloud_init | | | x | x | | | | +cockpit | | | x | x | | | | Fedora 35 +collabora | | | x | x | | | | +collect_rpmnew_rpmsave | | | x | x | | | | Fedora 40 +coturn | | | x | x | | | | +crypto_policy | | | x | x | x | | | +dnf_makecache | | | x | x | | | | +dnf_versionlock | | | x | x | | | | Fedora 40 +docker | | | x | | | | | +duplicity | | | x | x | x | | | Fedora 35 +elastic_agent | | | | x | | | x | +elastic_agent_fleet_server | | | | x | | | x | +elasticsearch | | | x | x | | | x | +exoscale_vm | | | | | | | | Fedora 35+ +fail2ban | | | x | x | | | | +fangfrisch | | | | x | | | | +files | | | x | x | x | | | +firewall | | | x | x | | | | +freeipa_client | | | x | x | x | | | +freeipa_server | | | x | x | x | | | +github_project_createrepo | | | x | | | | | +gitlab_ce | | | x | | | | | +glances | | | x | x | | | | +glpi_agent | | | x | x | | | | +grafana | | | x | x | x | | | +grafana_grizzly | | | x | x | | | | +grav | | | x | | | | | +graylog_datanode | x | x | x | x | | | | +graylog_server | x | x | x | | | | | +haveged | | | x | x | | | | +hetzner_vm | | | | | | | | Fedora 35+ +hostname | | | x | x | | | | +-------------------------------------+----+----+---+---+----+-------+-------+----------- + | Debian | RHEL | Ubuntu | other +Role | 12 | 13 | 8 | 9 | 10 | 22.04 | 24.04 | +-------------------------------------+----+----+---+---+----+-------+-------+----------- +icinga2_agent | | x | x | x | | | | Fedora 35 +icinga2_master | x | x | x | x | | | | +icingadb | x | x | x | x | | | | +icingadb_web | x | x | x | x | | | | +icinga_kubernetes | | | | x | | | | +icinga_kubernetes_web | | | | x | | | | +icingaweb2 | x | x | x | x | | | | +icingaweb2_module_businessprocess | x | x | x | x | | | | +icingaweb2_module_company | x | x | x | x | | | | +icingaweb2_module_cube | x | x | x | x | | | | +icingaweb2_module_director | x | x | x | x | | | | +icingaweb2_module_doc | x | x | x | x | | | | +icingaweb2_module_fileshipper | x | x | x | x | | | | +icingaweb2_module_generictts | x | x | x | x | | | | +icingaweb2_module_grafana | x | x | x | x | | | | +icingaweb2_module_incubator | x | x | x | x | | | | +icingaweb2_module_jira | | | x | x | | | | +icingaweb2_module_monitoring | | | x | | | | | +icingaweb2_module_pdfexport | x | x | x | x | | | | +icingaweb2_module_reporting | | | x | | | | | +icingaweb2_module_vspheredb | x | x | x | x | | | | +icingaweb2_module_x509 | x | x | x | x | | | | +icingaweb2_theme_linuxfabrik | | | x | x | | | x | +influxdb | x | x | x | x | | | | +infomaniak_vm | | | | | | | | Fedora 35+ +kdump | | | x | x | | | | +keepalived | | | x | | | | | +kernel_settings | x | x | x | x | | | | +keycloak | | | x | | | | | +kibana | | | | x | | | x | +kvm_host | | | x | | | | x | +kvm_vm | | | x | | | | x | +libmaxminddb | | | x | | | | | +librenms | | | x | | | | | +libreoffice | | | x | | | | | +login | | | x | x | | | | Fedora 35+ +logrotate | | | x | x | x | | | Fedora +logstash | | | | x | | | x | +lvm | | | | | | | | +mailto_root | | | x | x | | | | +mailx | x | x | x | x | x | | | Fedora +mariadb_server | x | x | x | x | | | | Galera on Debian is untested +mastodon | | | | x | | | | +maxmind_geoip | | | x | | | | | +minio_client | | | x | | | | | +mirror | | | x | x | | | | +mod_maxminddb | | | x | | | | | +mongodb | x | x | x | x | | | | +monitoring_plugins | | | x | x | | | | Debian 9, Fedora, Suse, Windows +monitoring_plugins_grafana_dashboards| | | x | x | | | | +-------------------------------------+----+----+---+---+----+-------+-------+----------- + | Debian | RHEL | Ubuntu | other +Role | 12 | 13 | 8 | 9 | 10 | 22.04 | 24.04 | +-------------------------------------+----+----+---+---+----+-------+-------+----------- +moodle | - | - | x | x | | | | +motd | | | x | x | | | | +mount | | | x | x | | | | +network | - | - | x | x | | | | +nextcloud | | | x | x | | | | +nfs_client | | | x | x | | | | +nfs_server | | | x | | | | | +nodejs | | | x | x | | | | +objectstore_backup | | | x | | | | | +opensearch | | | x | | | | | +open_vm_tools | | | x | x | | | | +openvpn_server | | | x | x | | | | +php | x | x | x | x | | | | +podman_containers | | | | x | | | | +policycoreutils | | | x | x | x | | | Fedora 35 +postfix | x | x | x | x | x | | | Fedora 35 +postgresql_server | | | x | | | | | +proxysql | | | x | | | | | +python | x | x | x | x | | | | Windows +python_venv | x | x | x | x | | | | Fedora 35 +qemu_guest_agent | | | x | x | x | | | +redis | x | x | x | x | | | | +repo_baseos | | | x | x | x | | | +repo_collabora | | | x | | | | | +repo_collabora_code | | | x | x | | | | +repo_debian_base | | | - | - | | | | +repo_docker | | | x | | | | | +repo_elasticsearch | | | x | x | | | x | +repo_epel | | | x | x | x | | | +repo_gitlab_ce | | | x | | | | | +repo_gitlab_runner | | | x | | | | | +repo_grafana | x | x | x | x | | | | +repo_graylog | x | x | x | | | | | +repo_icinga | x | x | x | x | | x | | +repo_influxdb | x | x | x | x | | | | +repo_mariadb | x | x | x | x | | | x | +repo_mongodb | x | x | x | x | | | | +repo_monitoring_plugins | | | x | x | | x | | +repo_mydumper | | | x | x | | | | +repo_opensearch | x | x | x | | | x | x | +repo_postgresql | | | | x | | | | +repo_proxysql | | | x | x | | | | +repo_redis | x | x | | | | | | +-------------------------------------+----+----+---+---+----+-------+-------+----------- + | Debian | RHEL | Ubuntu | other +Role | 12 | 13 | 8 | 9 | 10 | 22.04 | 24.04 | +-------------------------------------+----+----+---+---+----+-------+-------+----------- +repo_remi | | | x | x | | | | Fedora 35 +repo_rpmfusion | | | x | | | | | +repo_sury | x | x | - | - | | | | +rocketchat | | | x | | | | | Fedora 35 +rsyslog | | | x | x | | | | +selinux | | | x | x | | | | +shell | | | x | x | x | | | +snmp | | | x | x | | | | +squid | | | | x | | | | +sshd | x | x | x | x | x | | | Fedora 40 +system_update | x | x | x | x | | | | +systemd_journald | | | x | x | | | | +systemd_unit | | | x | x | x | | | +telegraf | | | x | | | | | +timezone | | | x | x | | | | Fedora 35 +tools | | | x | x | | | | Fedora +unattended_upgrades | | | | | | | | +vsftpd | | | x | | | | | +wordpress | | | x | | | | | +yum_utils | | | x | x | x | | | Fedora 35 +-------------------------------------+----+----+---+---+----+-------+-------+----------- ``` Legend: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..fede66e56 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,633 @@ +# Contributing + + +## Linuxfabrik Standards + +The following standards apply to all Linuxfabrik repositories. + + +### Code of Conduct + +Please read and follow our [Code of Conduct](CODE_OF_CONDUCT.md). + + +### Issue Tracking + +Open issues are tracked on GitHub Issues in the respective repository. + + +### Pre-commit + +Some repositories use [pre-commit](https://pre-commit.com/) for automated linting and formatting checks. If the repository contains a `.pre-commit-config.yaml`, install [pre-commit](https://pre-commit.com/#install) and configure the hooks after cloning: + +```bash +pre-commit install +``` + + +### Commit Messages + +Commit messages follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification: + +``` +(): +``` + +If there is a related issue, append `(fix #N)`: + +``` +(): (fix #N) +``` + +`` must be one of: + +- `chore`: Changes to the build process or auxiliary tools and libraries +- `docs`: Documentation only changes +- `feat`: A new feature +- `fix`: A bug fix +- `perf`: A code change that improves performance +- `refactor`: A code change that neither fixes a bug nor adds a feature +- `style`: Changes that do not affect the meaning of the code (whitespace, formatting, etc.) +- `test`: Adding missing tests + + +### Changelog + +Document all changes in `CHANGELOG.md` following [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). Sort entries within sections alphabetically. + + +### Language + +Code, comments, commit messages, and documentation must be written in English. + + +### Coding Conventions + +- Sort variables, parameters, lists, and similar items alphabetically where possible. +- Always use long parameters when using shell commands. +- Use RFC [5737](https://datatracker.ietf.org/doc/html/rfc5737), [3849](https://datatracker.ietf.org/doc/html/rfc3849), [7042](https://datatracker.ietf.org/doc/html/rfc7042#section-2.1.1), and [2606](https://datatracker.ietf.org/doc/html/rfc2606) in examples and documentation: + - IPv4: `192.0.2.0/24`, `198.51.100.0/24`, `203.0.113.0/24` + - IPv6: `2001:DB8::/32` + - MAC: `00-00-5E-00-53-00` through `00-00-5E-00-53-FF` (unicast), `01-00-5E-90-10-00` through `01-00-5E-90-10-FF` (multicast) + - Domains: `*.example`, `example.com` + + +--- + + +## Ansible Development Guidelines + +To see these concepts in practice, have a look at the [example role](https://github.com/Linuxfabrik/lfops/tree/main/roles/example). + + +### Style Guide + +YAML: + +* Do not use `---` at the top of YAML files. It is only required if one specifies YAML directives above it. +* For YAML files, use the `.yml` extension. This is consistent with `ansible-galaxy init`. +* In YAML files, use 2 spaces for indentation. Elsewhere prefer 4 spaces. +* Use `true` / `false` instead of `yes` / `no`, as they are actually part of YAML. +* Always quote strings and prefer single quotes over double quotes. The only time you should use double quotes is when they are nested within single quotes (e.g. Jinja map reference), or when the string requires escaping characters (e.g. using `\n` to represent a newline). Even though strings are the default type for YAML, syntax highlighting looks better when types are set explicitly. It also helps troubleshooting malformed strings. +* If you must write a long string, use the "folded scalar" (`>` converts newlines to spaces, `|` keeps newlines) style and omit all special quoting. +* Do not quote booleans (e.g. `true`/`false`). +* Do not quote numbers (e.g. `42`). +* Do not quote octal numbers (e.g. `0o755`). +* Insert whitespaces around Jinja filters like so: `{{ my_var | d("my_default") }}`. +* Indent list items: + + Do: + + ```yaml + list1: + - item1 + - item2 + ``` + + Don't: + + ```yaml + list2: + - item1 + - item2 + list3: [ 'tag1', 'tag2' ] + ``` + +Ansible: + +* Keep 2 empty lines before each `- block:`. +* Prefer `item["subkey"]` to `item.subkey`, since that notation always works. +* Do not use special characters other than underscores in variable names. +* Try to name tasks after their respective shell commands. This makes it easy for sysadmins to understand what is going on. +* Do not use colons at the end of task names. `- name: 'Combined Users:'` renders as `Combined Users:]` in the output. +* Split long Jinja2 expressions into multiple lines. +* Always use `| length > 0` instead of bare `| length` in conditionals. Ansible 2.19+ requires conditional results to be bool, not int. +* Use the `| bool` filter when using bare variables (expressions consisting of just one variable reference without any operator). This guards against YAML quoting mistakes where a boolean ends up as a string: in a `when:` clause, the string `'false'` is truthy (non-empty string) and would incorrectly evaluate to true without `| bool`. Applying it consistently — including in module parameters where Ansible's type coercion would handle it — avoids having to think about where it matters. +* Order module parameters semantically, not alphabetically. The general order is: first identify the target, then describe the action, then set ownership and permissions. For example: + + ```yaml + - name: 'mkdir -p /etc/example' + ansible.builtin.file: + path: '/etc/example' + state: 'directory' + owner: 'root' + group: 'root' + mode: 0o755 + + - name: 'Deploy /etc/example/example.conf' + ansible.builtin.template: + backup: true + src: 'etc/example/example.conf.j2' + dest: '/etc/example/example.conf' + owner: 'root' + group: 'root' + mode: 0o644 + ``` + + This is an exception to the general "sort alphabetically" rule, as alphabetical ordering would obscure what the task operates on. + +Commit scopes: + +* Use the role or playbook path as commit scope: + + ``` + fix(roles/graylog_server): prevent warn on receiveBufferSize (fix #341) + ``` + +* For the first commit, use the message `Add roles/` or `Add playbooks/`. + + +### Deliverables + +When creating a new role, make sure to deliver: + +* The role itself. +* `roles//README.md`, following `roles/example/README.md` as a template. +* `roles//meta/argument_specs.yml` declaring all user-facing variables. +* Update `playbooks/README.md`. +* Update `playbooks/all.yml`. +* Update `COMPATIBILITY.md`. +* Update `CHANGELOG.md`. + + +### Playbooks + +* Each playbook must contain all dependencies to run flawlessly against a newly installed machine. +* Playbooks installing an application together with software packages that are complex to configure (`apache_httpd`, `mariadb_server` and/or `php`) as a dependency are prefixed by `setup_`. Example: `setup_nextcloud` because Nextcloud also needs Apache httpd, MariaDB Server etc. +* The name of the playbook should be `- name: 'Playbook linuxfabrik.lfops.example'`. +* After creating a new playbook, document it in `playbooks/README.md` and add it in the `playbooks/all.yml`. +* Every run of the playbooks should be logged to `/var/log/linuxfabrik-lfops.log`. Include the following code in the playbook for this: + + ```yaml + pre_tasks: + - ansible.builtin.import_role: + name: 'shared' + tasks_from: 'log-start.yml' + tags: + - 'always' + + roles: + + - role: 'example' + + post_tasks: + - ansible.builtin.import_role: + name: 'shared' + tasks_from: 'log-end.yml' + tags: + - 'always' + ``` + + +### Roles + +* To understand/use a role, reading the README must be enough. +* Idempotency: Roles should not perform changes when applied a second time to the same system with the same parameters, and they should not report that changes have been done if they have not been done. More importantly, it should not damage an existing installation when applied a second time (even without tags). Example: + ```yaml + - name: 'Create new DBA "{{ mariadb_root["user"] }}" after a fresh installation' + ansible.builtin.command: 'mysql --unbuffered --execute "{{ item }}"' + loop: + - 'create user if not exists "{{ mariadb_root["user"] }}"@"%" identified by "{{ mariadb_root.password }}";' + - 'grant all privileges on *.* to "{{ mariadb_root["user"] }}"@"%" with grant option;' + - 'flush privileges;' + register: 'mariadb_new_dba_result' + changed_when: 'mariadb_new_dba_result["stderr"] is not match("ERROR \d+ \(28000\).*")' + failed_when: 'mariadb_new_dba_result["rc"] != 0 and mariadb_new_dba_result["stderr"] is not match("ERROR \d+ \(28000\).*")' + ``` +* If a role was run without tags, it should deliver a completely installed application (assuming it installs an application). +* Do not over-engineer the role during the development — it should fulfill its use case, but can grow and be improved on later. +* There should be one role per software application. If there are multiple versions of the software, e.g. PHP 7.1, 7.2, 7.3, etc., they all should be supported by a single role. +* Do not use role dependencies via `meta/main.yml`. Dependencies are handled in playbooks. +* Whenever the role requires a list as an input, use a list of dictionaries with `state: present/absent`. See "Combined Variables" below. +* Fail loudly. Avoid constructs that could suppress error messages, like `IfModule` in Apache HTTPd. This makes debugging and troubleshooting a lot easier. +* Do not support software versions that are EOL. +* When implementing a role for a new application, consider security, monitoring and backups. +* For mailing, use the `sendmail` utility, as it provides a consistent interface across distros. +* All user-facing information should be included in the README. Comments are intended for developers only. +* Avoid breaking changes as far as possible, but don't let them stand in the way of improvements. +* Document all changes in the [CHANGELOG.md](https://github.com/Linuxfabrik/lfops/blob/main/CHANGELOG.md) file. + + +#### Tasks + +* Always use the FQCN of the module. +* Always use meta modules wherever possible: + * `ansible.builtin.package` instead of `ansible.builtin.yum`, `ansible.builtin.dnf` or `ansible.builtin.apt` + * `ansible.builtin.service` instead of `ansible.builtin.systemd` +* Use the following modules in preference to their alternatives: + * `ansible.builtin.command` or `ansible.windows.win_command` over `ansible.builtin.shell` over `ansible.builtin.raw` + * `ansible.builtin.template` over `ansible.builtin.copy`, `ansible.builtin.lineinfile` or `ansible.builtin.blockinfile`. Templating the whole file leads to more consistent, deterministic, and expected results. +* Do not use `state: 'latest'` for the `ansible.builtin.package` module as this is not idempotent. Always use `state: 'present'`. +* Always use `delegate_to: 'localhost'` instead of `local_action`. +* Always provide `changed_when`, `creates`, or `removes` for `ansible.builtin.command` and `ansible.builtin.shell` tasks to ensure idempotency. Use `changed_when: false` for read-only commands. +* When deploying files with `ansible.builtin.template`, always set `backup`, `src`, `dest`, `owner`, `group`, and `mode`. +* Prefer `ansible.builtin.assert` over `ansible.builtin.fail` with `when` for validation checks. There is basically no technical difference; this guideline is only for consistency. +* Optionally add `ansible.builtin.debug` tasks for `__combined_var` variables so the user can see what the role will do. +* Split the service `enabled` and `state` into separate tasks. This is relevant for handlers that would restart the service, see "Handlers" below. +* Always check if SELinux is enabled before managing ports, file contexts, or booleans: + + ```yaml + - name: 'semanage port --add --type example_port_t --proto tcp 8080' + community.general.seport: + ports: 8080 + proto: 'tcp' + setype: 'example_port_t' + state: 'present' + when: + - 'ansible_facts["selinux"]["status"] != "disabled"' + ``` + + +#### Handlers + +* Use handlers in favor to `some_result is changed` if no `meta: flush_handlers` is required or if it would prevent duplicate code. +* Since handlers are global, prefix them with the role name to make sure the correct one is used. +* Use chained handlers (notify) when a validation step should precede the actual action, e.g. a config validation handler that notifies a restart handler. +* Handlers that restart or reload a service should skip execution when the service was just started (redundant) or when the user wants it stopped. For this, the result of the service state task has to be registered and checked. Example: + + ```yaml + - name: 'example: restart example' + ansible.builtin.service: + name: 'example' + state: 'restarted' + when: + - '__example__service_state_result is not changed' + - 'example__service_state != "stopped"' + ``` + + +#### Tags + +* Naming scheme: `role_name` and `role_name:section`. For example `apache_httpd` and `apache_httpd:vhosts`. +* The role should only do what one expects from the tag name. For example, the `mariadb:user` tag only manages MariaDB users. +* The README of a role should provide a list of the available tags and what they do. +* The tags should be set in the role itself. Do not set them in the playbook. +* Blocks/tasks that install base packages do not require tags such as `apache:pkgs`, `apache:setup` or `apache:install`. There is no real world scenario where it makes sense to only run the installation via Ansible, some configuration is always required. +* For each task, consider to which areas it belongs. A task will usually have multiple tags. + + +#### Variables + +* `./vars`: Variables that are not to be edited by users. +* `./defaults`: Default variables for the role, might be overridden by the user in the inventory. +* Document all user-facing variables in the README. Have a look at `roles/example/README.md` for the format. +* Do not set defaults for mandatory variables. +* Naming scheme: `___`, for example `apache_httpd__server_admin`. +* No need to invent new names, use the key-names from the config file (if possible), for example `redis__conf_maxmemory`. +* Prefix role-internal variables with `__`, for example `__example__sysconfig_path`. This makes it easy to determine which variables are user-facing and therefore should be in the README. +* Avoid embedding large lists or "magic values" directly into the playbook. Such static lists should be placed into the `vars/main.yml` file and named appropriately. +* If you need random but predictable/idempotent values, use the `inventory_hostname` as seed. Example for setting the minutes of an hour: `{{ 59 | random(seed=inventory_hostname) }}`. +* When guarding optional role variables (strings or lists) that may be undefined, use `is defined and my_var | length > 0`. This catches both undefined variables and empty values (e.g. `my_var: ''`). Bare `is defined` is fine for dict subkeys where presence alone is the signal (e.g. `item["cidr"] is defined`) or for result attributes (e.g. `result["failed"] is defined`). +* Any secrets (passwords, tokens etc.) should not be provided with default values in the role. It is important for a secure-by-default implementation to ensure that an environment is not vulnerable due to the production use of default secrets. Users must be forced to properly provide their own secret variable values. +* Always use the `ansible_facts` dictionary (e.g. `ansible_facts["os_family"]` instead of `ansible_os_family`). The old pre-2.5 "facts injected as separate variables" naming system will be deprecated in a future release of Ansible. + + +##### Variable Validation with `argument_specs` + +Every role should include a `meta/argument_specs.yml` that declares all user-facing variables with their types. Ansible validates these automatically at role entry (before any tasks run), catching type mismatches and missing required variables without manual assert code. + +Include all variables documented in the README: mandatory variables, simple optional variables, and the `__host_var`/`__group_var` variants of injection variables. Do not include internal variables (`__dependent_var`, `__role_var`, `__combined_var`). + +Guidelines for `argument_specs`: + +* Use `required: true` for mandatory variables (replaces manual `assert` + `is defined` checks). +* Use `type` and `choices` where applicable. For injection variables where the default is `''` (empty string) but the actual value is a different type (e.g. int), use `type: 'raw'` to avoid rejecting the empty default. +* Omit `default` when the default in `defaults/main.yml` is a Jinja2 expression (e.g. `'{{ __example__conf_worker_threads }}'`), as `argument_specs` cannot evaluate it. +* Set `default` when it is a static value (e.g. `true`, `'started'`, `[]`). +* Sort entries alphabetically. + +Use `ansible.builtin.assert` in the tasks for validations that `argument_specs` cannot express: value ranges, regex patterns, or cross-variable dependencies. Tag the assert block with `always` so it runs even when other roles reference the validated variables. + +Have a look at the `example` role's `meta/argument_specs.yml` for a complete reference. + + +##### Combined Variables + +The goal of combined variables is that variables can be set in multiple places, and then merged in order to be used in the role. For example, the user can overwrite a specific configuration role default (`__role_var`) from their inventory (`__host_var` / `__group_var`). + +Furthermore, other roles can also inject their sensible defaults via the `__dependent_var`, with a higher precedence than the role defaults, but lower than the user's inventory. + +To enable this behavior, you must define the `__combined_var` as follows: +```yaml +# for list of dictionaries +my_role__my_var__dependent_var: [] +my_role__my_var__group_var: [] +my_role__my_var__host_var: [] +my_role__my_var__role_var: [] +my_role__my_var__combined_var: '{{ ( + my_role__my_var__role_var + + my_role__my_var__dependent_var + + my_role__my_var__group_var + + my_role__my_var__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +# for simple values like strings, numbers or booleans +my_role__my_var__dependent_var: '' +my_role__my_var__group_var: '' +my_role__my_var__host_var: '' +my_role__my_var__role_var: '' +my_role__my_var__combined_var: '{{ + my_role__my_var__host_var if (my_role__my_var__host_var | string | length) else + my_role__my_var__group_var if (my_role__my_var__group_var | string | length) else + my_role__my_var__dependent_var if (my_role__my_var__dependent_var | string | length) else + my_role__my_var__role_var + }}' +``` + +The `__combined_var` will then be used in the tasks or templates of the role. + +The role must always implement some sort of `state` key, otherwise the user cannot unset a value defined in the defaults. Suppose the user wants to disable the default localhost vHost of the Apache HTTPd role: +```yaml +# defaults/main.yml +apache_httpd__vhosts__role_var: + - conf_server_name: 'localhost' + virtualhost_port: 80 + template: 'localhost' +``` + +Without the `state` key, the user has no way of achieving this, as they cannot remove previously defined elements from the list via the inventory. With the `state` key, the role knows it has to remove the vHost: +```yaml +# inventory +apache_httpd__vhosts__role_var: + - conf_server_name: 'localhost' + virtualhost_port: 80 + state: 'absent' +``` + +The handling of the state in the role should look something like this, assuming the default value for `state` is `present`: +```yaml +- name: 'Remove sites-available vHosts' + ansible.builtin.file: + path: '...' + state: 'absent' + loop: '{{ apache_httpd__vhosts__combined_var }}' + loop_control: + label: '{{ item["name"] }}' + when: + - 'item["state"] | d("present") == "absent"' + +- name: 'Create sites-available vHosts' + ansible.builtin.template: + src: '...' + dest: '...' + loop: '{{ apache_httpd__vhosts__combined_var }}' + loop_control: + label: '{{ item["name"] }}' + when: + - 'item["state"] | d("present") != "absent"' +``` + +Other times it is useful to generate a list of present and absent elements, for example when using `ansible.builtin.package`, as providing the packages as a list is much faster than looping through them. +```yaml +- name: 'Ensure PHP modules are absent' + ansible.builtin.package: + name: '{{ php__modules__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | map(attribute="name") }}' + state: 'absent' + +- name: 'Ensure PHP modules are present' + ansible.builtin.package: + name: '{{ (php__modules__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | map(attribute="name")) + + (php__modules__combined_var | selectattr("state", "undefined") | map(attribute="name")) }}' + state: 'present' +``` + +Or in a Jinja2 template: +``` +{% for item in apache_tomcat__roles__combined_var if item['state'] | d('present') != 'absent' %} + +{% endfor %} +``` + +The vHost example above can be used to demonstrate another feature of `linuxfabrik.lfops.combine_lod`. Normally, the list items are combined based on a `unique_key` that should match, for example, the `name` key. However, this does not work with `conf_server_name` because you can have a vHost with the same `conf_server_name` for multiple ports. This means that the `unique_key` must be a *combination* of `conf_server_name` and `virtualhost_port`: +```yaml +apache_httpd__vhosts__combined_var: '{{ ( + apache_httpd__vhosts__role_var + + apache_httpd__vhosts__dependent_var + + apache_httpd__vhosts__group_var + + apache_httpd__vhosts__host_var + ) | linuxfabrik.lfops.combine_lod(unique_key=["conf_server_name", "virtualhost_port"]) + }}' +``` + +Note: + +* Have a look at `ansible-doc --type filter linuxfabrik.lfops.combine_lod`. +* Always use lists of dictionaries or simple values. Never use dictionaries directly, even though they allow overwriting of earlier elements, since one cannot template the keyname using Jinja2. This would prevent passing on of variables, especially in `__dependent_var` (for details have a look at ). +* Simple value `__combined_var` are always returned as strings. Convert them to integers when needed. + + +##### `skip_role` Variables in Playbooks + +The `playbook_name__role_name__skip_role` and `playbook_name__role_name__skip_role_injections` variables should provide the user an option to skip the role and the role's injections respectively. Have a look at the [README.md](./README.md#skipping-roles-in-a-playbook). + +For this, we need to set the following two internal variables at the top of the playbook (between the `hosts:` and `roles:`): + +```yaml +vars: + + setup_icinga2_master__icingaweb2__skip_injections__internal_var: '{{ setup_icinga2_master__icingaweb2__skip_injections | d(setup_icinga2_master__icingaweb2__skip_role__internal_var) }}' + setup_icinga2_master__icingaweb2__skip_role__internal_var: '{{ setup_icinga2_master__icingaweb2__skip_role | d(false) }}' +``` + +Then use them with the roles as follows: + +```yaml +- role: 'linuxfabrik.lfops.icingaweb2' + when: + - 'not setup_icinga2_master__icingaweb2__skip_role__internal_var' + +- role: 'linuxfabrik.lfops.mariadb_server' + mariadb_server__databases__dependent_var: '{{ + (not setup_icinga2_master__icingaweb2__skip_injections__internal_var) | ternary(icingaweb2__mariadb_server__databases__dependent_var, []) + }}' + mariadb_server__users__dependent_var: '{{ + (not setup_icinga2_master__icingaweb2__skip_injections__internal_var) | ternary(icingaweb2__mariadb_server__users__dependent_var, []) + + }}' +``` + +Make sure to use the following format when passing multiple injections to avoid needing to flatten the list: + +```yaml +- role: 'linuxfabrik.lfops.icinga2_master' + icinga2_master__api_users__dependent_var: '{{ + (not setup_icinga2_master__icingadb__skip_injections__internal_var) | ternary(icingadb__icinga2_master__api_users__dependent_var, []) + + (not setup_icinga2_master__icingaweb2_module_director__skip_injections__internal_var) | ternary(icingaweb2_module_director__icinga2_master__api_users__dependent_var, []) + + (not setup_icinga2_master__icingaweb2__skip_injections__internal_var) | ternary(icingaweb2__icinga2_master__api_users__dependent_var, []) + }}' +``` + + +#### Templates + +* Always use the `ansible.builtin.template` module instead of the `ansible.builtin.copy` module, even if there are currently no variables in the file. This makes it easier to extend later on, and allows the usage of an automatically generated header. +* Always create a backup file including the timestamp information (e.g. `keycloak.conf.23875.2025-02-14@15:19:16~`) so you can get the original file back if you somehow clobbered it incorrectly, by using `backup: true`. +* Always add the following to the top of templates, using the appropriate comment syntax: + ``` + # {{ ansible_managed }} + # 2021081601 + ``` +* Do not use `{{ template_run_date }}` inside the template. It is the date that the template was rendered, which is done during every Ansible run. This means that the task will always be changed, even if nothing else changed in the template, therefore breaking idempotency. +* Use the target path for the file in the `template` folder, for example: `templates/etc/httpd/sites-available/default.conf.j2`. This makes it clear what the file is for, and avoids name collisions. +* Always use the `.j2` file extension for files in the `template` folder. +* If deploying self-written scripts, copy them to `/usr/local/sbin` (due to SELinux). +* Keep templates as close to the original file as possible. This makes handling of rpmnew/rpmsave files easier. +* Add the following task after deploying a file that might get rpmnew or rpmsave files (or their Debian equivalents): + ```yaml + - name: 'Remove rpmnew / rpmsave (and Debian equivalents)' + ansible.builtin.include_role: + name: 'shared' + tasks_from: 'remove-rpmnew-rpmsave.yml' + vars: + shared__remove_rpmnew_rpmsave_config_file: '{{ item }}' + loop: '{{ repo_epel__repo_files }}' + ``` + + +#### OS-specific Variables + +If some variables need to be parameterized according to distribution and version (name of packages, configuration file paths, names of services), use OS-specific vars-files inside the `vars/` of your role. + +Variables with the same name are overridden by the files in `vars/` in order from least specific to most specific: + +* `os_family` covers a group of closely related platforms (e.g. `RedHat` covers `RHEL`, `CentOS`, `Fedora`) +* `distribution` (e.g. `CentOS`) is more specific than os_family +* `distribution_major_version` (e.g. `CentOS7`) is more specific than distribution +* `distribution_version` (e.g. `CentOS7.9`) is the most specific + +To load the variables include the `platform-variables.yml` in the `tasks/main.yml` like this: +```yaml +- name: 'Set platform/version specific variables' + ansible.builtin.import_role: + name: 'shared' + tasks_from: 'platform-variables.yml' + tags: + - 'always' +``` + +Use the `always` tag so the variables are available even when running with a specific tag — other roles in the playbook may reference these variables. + +Note that since `vars/` are higher up in the [Ansible variable precedence](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence) than inventory variables we cannot directly define our defaults there. Instead, we either need to use the `my_role__my_var__role_var` (as these already support overwriting of `role_vars`; see "Combined Variables") or to define an internal variable (prefixed with `__`) in the `vars/` file: + +```yaml +__my_role__my_simple_value: 'os-dependant default' +``` + +Then, in `defaults/main.yml`, we reference that internal variable as our public default: + +```yaml +my_role__my_simple_value: '{{ __my_role__my_simple_value }}' +``` + +This allows the user to overwrite `my_role__my_simple_value` in their inventory. + + +#### OS-specific Tasks + +In order to run only certain tasks based on the operating system platform, files need to be placed in `tasks/` with the filename of the supported "os family". + +Assume you have the following OS-specific task files, in order of most specific to least specific: + +* `tasks/CentOS7.4.yml` +* `tasks/CentOS7.yml` +* `tasks/RedHat.yml` +* `tasks/main.yml` + +Now, if you run Ansible against a *CentOS 7.9* host, for example, only these tasks are processed in the following order: + +1. `tasks/CentOS7.yml` +2. `tasks/main.yml` + +Include the OS-specific tasks in the `tasks/main.yml` like this: + +```yaml +- name: 'Perform platform/version specific tasks' + ansible.builtin.include_tasks: '{{ __task_file }}' + when: '__task_file | length > 0' + vars: + __task_file: '{{ lookup("ansible.builtin.first_found", __first_found_options) }}' + __first_found_options: + files: + - '{{ ansible_facts["distribution"] }}{{ ansible_facts["distribution_version"] }}.yml' + - '{{ ansible_facts["distribution"] }}{{ ansible_facts["distribution_major_version"] }}.yml' + - '{{ ansible_facts["distribution"] }}.yml' + - '{{ ansible_facts["os_family"] }}{{ ansible_facts["distribution_version"] }}.yml' + - '{{ ansible_facts["os_family"] }}{{ ansible_facts["distribution_major_version"] }}.yml' + - '{{ ansible_facts["os_family"] }}.yml' + paths: + - '{{ role_path }}/tasks' + skip: true + tags: + - 'always' +``` + +Make sure to set the tags directly on the `include_tasks` task, and not on a surrounding block. Setting it on a block causes the tag to be inherited to all tasks in that block, therefore also to included tasks. + + +### Handling of GPG Keys under Debian (APT Keyring) + +Adding a key to `/etc/apt/trusted.gpg.d` is insecure because it adds the key for all repositories. Therefore, `apt-key` (and the `ansible.builtin.apt_key` module) were deprecated. + +The new and secure workflow is: + +1. Store the GPG key in `/etc/apt/keyrings/`. The file extension **has** to match the file format. Use the `file` utility to determine the format: + * `PGP public key block Public-Key (old)`: ASCII-armored key. Use `.asc` extension. + * `OpenPGP Public Key`: Binary GPG key. Use `.gpg` extension. + +2. Explicitly specify the path to the key in the `/etc/apt/sources.list.d/` file, for example: `deb [signed-by=/etc/apt/keyrings/icinga.asc] https://...`. + +Have a look at the [repo_icinga/tasks/Debian.yml](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_icinga/tasks/Debian.yml) (ASCII armored key) or [repo_mariadb/tasks/Debian.yml](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb/tasks/Debian.yml) (binary GPG key) roles. + + +### Roles with Special Features + +Roles with special technical implementations and capabilities: + +* [apache_solr](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_solr): Installs the correct version of a dependent package (i.e. java) based on the solr version. + +* [github_project_createrepo](https://github.com/Linuxfabrik/lfops/tree/main/roles/github_project_createrepo): Sets FACL entries to allow both the webserver user and the github-project-createrepo user to access files. + +* [librenms](https://github.com/Linuxfabrik/lfops/tree/main/roles/librenms): Compiles and loads an SELinux module. + +* [mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/mongodb): The role implements a `skip` state that completely ignores the entry. + +* [monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/monitoring_plugins): Implements install & maintenance as well as uninstall/remove on Linux and Windows. + +* [moodle](https://github.com/Linuxfabrik/lfops/tree/main/roles/moodle): Searches for the latest and most recent specific LTS version of itself on GitHub. + +* [nextcloud](https://github.com/Linuxfabrik/lfops/tree/main/roles/nextcloud): The role performs some tasks only on the very first run and never again after that. To do this, it creates a state file for itself so that it knows that it must skip certain tasks on subsequent runs. The role's README has a concise but informative "Tags" section. + +* [php](https://github.com/Linuxfabrik/lfops/tree/main/roles/php): Build list for ansible.builtin.packages based on state `present` and `absent`. Some Jinja templates use non-default strings marking the beginning/end of a block. + +* [redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/redis): Gathers the installed version and deploys the corresponding config file. Configures Systemd with Unit File overrides. + +* [telegraf](https://github.com/Linuxfabrik/lfops/tree/main/roles/telegraf): Jinja templates use non-default strings marking the beginning/end of a print statement. + +* [wordpress](https://github.com/Linuxfabrik/lfops/tree/main/roles/wordpress): chmod: Sets file and folder permissions separately using `find`. + + +### Credits + +* +* +* diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 14b9e42c2..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,775 +0,0 @@ -Linuxfabrik's Ansible Development Guidelines -============================================ - -Rules of Thumb --------------- - -* Do not set defaults for mandatory variables. -* Always sort variables, tags, etc. -* All user-facing information should be in the README. Only use comments in other places for technical information. -* Keep templates as close to the original file as possible. This makes handling of rpmnew files easier. -* Prefer `item["subkey"]` to `item.subkey`. -* Always use the long parameter when using shell commands. - -Playbooks: - -* Each playbook must contain all dependencies to run flawlessly against a newly installed machine. -* Playbooks installing an application together with software packages that are complex to configure (``apache_httpd``, ``mariadb_server`` and/or ``php``) as a dependency are prefixed by ``setup_``. Example: ``setup_nextcloud`` because Nextcloud also needs Apache httpd, MariaDB Server etc. -* The name of the playbook should be ``- name: 'Playbook linuxfabrik.lfops.example'``. -* After creating a new playbook, add it in the ``playbooks/all.yml``. -* Every run of the playbooks should be logged to ``/var/log/linuxfabrik-lfops.log``. Include the following code in the playbook for this - - .. code-block:: yaml - - pre_tasks: - - ansible.builtin.import_role: - name: 'shared' - tasks_from: 'log-start.yml' - tags: - - 'always' - - roles: - - role: '...' - - post_tasks: - - ansible.builtin.import_role: - name: 'shared' - tasks_from: 'log-end.yml' - tags: - - 'always' - -Roles: - -* To understand/use a role, reading the readme and the defaults/main.yml must be enough. -* Idempotency: Roles should not perform changes when applied a second time to the same system with the same parameters, and it should not report that changes have been done if they have not been done. More importantly, it should not damage an existing installation when applied a second time (even without tags). Example: - - .. code-block:: yaml - - - name: Create new DBA '{{ mariadb_root.user }}' after a fresh installation - ansible.builtin.command: mysql --unbuffered --execute '{{ item }}' - with_items: - - create user if not exists "{{ mariadb_root.user }}"@"%" identified by "{{ mariadb_root.password }}"; - - grant all privileges on *.* to "{{ mariadb_root.user }}"@"%" with grant option; - - flush privileges; - register: mariadb_new_dba_result - changed_when: mariadb_new_dba_result.stderr is not match('ERROR \d+ \(28000\).*') - failed_when: mariadb_new_dba_result.rc != 0 and mariadb_new_dba_result.stderr is not match('ERROR \d+ \(28000\).*') - -* If a role was run without tags, it should deliver a completely installed application (assuming it installs an application). -* Do not over-engineer the role during the development - it should fulfill its use case, but can grow and be improved on later. -* The role should support the installation and configuration of multiple major versions of the software. For example, PHP 7.1, 7.2, 7.3 etc. should all be supported by a single role. Upgrades are either done manually or using Ansible, depending on the software and the implementation effort. -* Do not use role dependencies via ``meta/main.yml``. Dependencies make it harder to maintain a role, especially if it has many complex dependencies. -* Whenever the role requires a list as an input, use a list of dictionaries, preferably with `state: present/absent`. See "Injections" below. -* Avoid constructs that could suppress error messages like ``IfModule`` in Apache HTTPd. This makes debugging and troubleshooting a lot easier. - -Common: - -* Document all changes in the `CHANGELOG.md `_ file. -* Do not support and remove software versions that are EOL. -* When implementing a role for a new application, consider security, monitoring and backups. - - -Pre-Commit ----------- - -We are using `pre-commit `_ to make sure any changes adhere to the styling rules. Install `pre-commit `_, then configure it as a git-hook using: ``pre-commit install``. - - -Style Guide ------------ - -* Do not use ``---`` at the top of YAML files. It is only required if one specifies YAML directives above it. -* For YAML files, use the ``.yml`` extension. This is consistent with ``ansible-galaxy init``. -* In YAML files, use 2 spaces for indentation. Elsewhere prefer 4 spaces. -* Do not use special characters other than underscores in variable names. -* Try to name tasks after their respective shell commands. Exceptions are STIG tasks (they are too small, and too many to achieve a consistent naming). -* Split long Jinja2 expressions into multiple lines. -* Use the ``| bool`` filter when using bare variables (expressions consisting of just one variable reference without any operator). -* Use ``true`` / ``false`` instead of ``yes`` / ``no``, as they are actually part of YAML. -* Indent list items: - - Do: - - .. code-block:: yaml - - list1: - - item1 - - item2 - - Don't: - - .. code-block:: yaml - - list2: - - item1 - - item2 - list3: [ 'tag1', 'tag2' ] - -* Use RFC `5737 `_, `3849 `_, `7042 `_ and `2606 `_ in examples / documentation: - - * IPv4 Addresses: ``192.0.2.0/24``, ``198.51.100.0/24``, ``203.0.113.0/24`` - * IPv6 Addresses: ``2001:DB8::/32`` - * MAC Addresses: ``00-00-5E-00-53-00 through 00-00-5E-00-53-FF`` (unicast), ``01-00-5E-90-10-00 through 01-00-5E-90-10-FF`` (multicast) - * Domains: ``*.example``, ``example.com`` - - -Quotes ------- - -* We always quote strings and prefer single quotes over double quotes. The only time you should use double quotes is when they are nested within single quotes (e.g. Jinja map reference), or when your string requires escaping characters (e.g. using ``\n`` to represent a newline). -* If you must write a long string, we use the "folded scalar" (``>`` converts newlines to spaces, ``|`` keeps newlines) style and omit all special quoting. -* Do not quote booleans (e.g. ``true``/``false``). -* Do not quote numbers (e.g. ``42``). -* Do not quote octal numbers (e.g. ``0755``), use the ``0o`` prefix instead (e.g. ``0o0755``) -* Do not quote things referencing the local Ansible environment (e.g. boolean logic in ``when:`` statements or names of variables we are assigning values to). - -.. code-block:: yml - - # bad - - name: start robot named S1m0ne - service: - name: s1m0ne - state: started - enabled: true - become: yes - - # good - - name: 'start robot named S1m0ne' - ansible.builtin.service: - name: 's1m0ne' - state: 'started' - enabled: true - become: true - - # double quotes w/ nested single quotes - - name: 'start all robots' - ansible.builtin.service: - name: '{{ item["robot_name"] }}' - state: 'started' - enabled: true - with_items: '{{ robots }}' - become: true - - # double quotes to escape characters - - name 'print some text on two lines' - ansible.builtin.debug: - msg: "This text is on\ntwo lines" - - # folded scalar style - - name: 'robot infos' - ansible.builtin.debug: - msg: > - Robot {{ item['robot_name'] }} is {{ item['status'] }} and in {{ item['az'] }} - availability zone with a {{ item['curiosity_quotient'] }} curiosity quotient. - with_items: robots - - # folded scalar when the string has nested quotes already - - name: 'print some text' - ansible.builtin.debug: - msg: > - "I haven’t the slightest idea," said the Hatter. - - # don't quote booleans/numbers - - name: 'download google homepage' - ansible.builtin.get_url: - dest: '/tmp' - timeout: 60 - url: 'https://google.com' - validate_certs: true - - # variables example 1 - - name: 'set a variable' - ansible.builtin.set_fact: - my_var: 'test' - - # variables example 2 - - name: 'print my_var' - ansible.builtin.debug: - var: my_var - when: ansible_facts['os_family'] == 'Darwin' - - # variables example 3 - - name: 'set another variable' - ansible.builtin.set_fact: - my_second_var: '{{ my_var }}' - -Why? - -Even though strings are the default type for YAML, syntax highlighting looks better when explicitly set types. This also helps troubleshoot malformed strings when they should be properly escaped to have the desired effect. - - -Whitespace-Control in Jinja-Templates -------------------------------------- - -So called "Block Scalar Styles": - -* ``>``: Folded. Single line breaks within the string are replaced by a space. All trailing line breaks except one are removed. -* ``|``: Literal. Preserves every line break in the string. All trailing line breaks except one are removed. -* ``>-``, ``|-``: Strip the final line break and any trailing empty lines. -* ``>+``, ``|+``: Keep the final line break and any trailing empty lines. - -Any indention remains only for the first line of a multiline variable content. - -Insert whitespaces around Jinja filters like so: ``{{ my_var | d("my_default") }}``. - -See also: - -* https://yaml.org/spec/1.2.2/ -* https://jinja.palletsprojects.com/en/latest/templates/#whitespace-control - - - -Deploying files to the remote server ------------------------------------- - -* Always use the ``ansible.builtin.template`` module instead of the ``ansible.builtin.copy`` module, even if there are currently no variables in the file. This makes it easier to extend later on, and allows the usage of an automatically generated header. - -* Always create a backup file including the timestamp information (e.g. ``keycloak.conf.23875.2025-02-14@15:19:16~``) so you can get the original file back if you somehow clobbered it incorrectly, by using ``backup: true``. - -* Always add the following to the top of templates, using the appropriate comment syntax: - - .. code-block:: - - # {{ ansible_managed }} - # 2021081601 - -* Do not use ``{{ template_run_date }}``. Such a timestamp is the date of the last change to the template itself, but changes on every Ansible run. - -* Use the target path for the file in the ``template`` folder, for example: ``templates/etc/httpd/sites-available/default.conf.j2``. This makes it clear what the file is for, and avoids name collisions. - -* Always use the ``.j2`` file extension for files in the ``template`` folder. - -* If deploying self-written scripts, copy them to ``/usr/local/bin`` (due to SELinux). - -* Add the following task after deploying a file that might get rpmnew or rpmsave files (or their Debian equivalents): - -.. code-block:: yaml - - - name: 'Remove rpmnew / rpmsave (and Debian equivalents)' - ansible.builtin.include_role: - name: 'shared' - tasks_from: 'remove-rpmnew-rpmsave.yml' - vars: - shared__remove_rpmnew_rpmsave_config_file: '{{ item }}' - loop: '{{ repo_epel__repo_files }}' - - -Handlers --------- - -* Use handlers in favor to ``some_result is changed`` if no ``meta: flush_handlers`` is required or if it would prevent duplicate code. -* Since handlers are global, prefix them with the role name to make sure the correct one is used. - - -Modules -------- - -* Always use meta modules wherever possible: - - * ``ansible.builtin.package`` instead of ``ansible.builtin.yum``, ``ansible.builtin.dnf`` or ``ansible.builtin.apt`` - * ``ansible.builtin.service`` instead of ``ansible.builtin.systemd`` - -* Use some modules in preference to others: - - * ``ansible.builtin.command`` or ``ansible.windows.win_command`` over ``ansible.builtin.shell`` over ``ansible.builtin.raw`` - * ``ansible.builtin.template`` over ``ansible.builtin.copy`` if deploying files to the remote host (see above) - -* Always use ``state: 'present'`` for the ``ansible.builtin.package`` module - we are installing, not updating. -* Always use the FQCN of the module. -* ``ansible.builtin.uri`` module: if consuming a RESTful API, check if it is returning the required content - - .. code-block:: yaml - - tasks: - - ansible.builtin.uri: - url: 'http://api.example.com' - return_content: yes - register: apiresponse - - fail: - msg: 'version was not provided' - when: "version" not in apiresponse.content - - -Tags ----- - -* Naming scheme: ``role_name`` and ``role_name:section``, for example ``apache_httpd``, ``apache_httpd:vhosts``. -* The role should only do what one expects from the tag name. For example, the ``mariadb:user`` tag only manages MariaDB users. -* The README of a role should provide a list of the available tags and what they do. -* The tags should be set in the role itself. Do not set them in the playbook. -* Blocks/tasks that install base packages do not need a tag like ``apache:pkgs``, ``apache:setup`` or ``apache:install``. Why? There is no reason to just run the setup task by tag, you always need to do at least some configuration afterwards. -* For each task, consider to which areas it belongs. A task will usually have multiple tags. - - -Being OS-specific ------------------ - -OS-specific Tasks -~~~~~~~~~~~~~~~~~ - -To indicate on which operating system platforms the role can be used, (empty) files must be placed in ``tasks/`` which have the file name of the supported "os family". In these files you probably want to perform platform specific tasks once, for the most specific match. - -Assume you have the following OS-specific task files, in order of most specific to least specific: - -* ``tasks/CentOS7.4.yml`` -* ``tasks/CentOS7.yml`` -* ``tasks/RedHat.yml`` -* ``tasks/main.yml`` - -Now, if you run Ansible against a *CentOS 7.9* host, for example, only these tasks are processed in the following order: - -1. ``tasks/CentOS7.yml`` -2. ``tasks/main.yml`` - -Include the OS-specific tasks in the ``tasks/main.yml`` like this, and set the tags appropriately (should contain all tags of the possibly included task files): - -.. code-block:: yaml - - - name: 'Perform platform/version specific tasks' - ansible.builtin.include_tasks: '{{ __task_file }}' - when: '__task_file | length > 0' - vars: - __task_file: '{{ lookup("ansible.builtin.first_found", __first_found_options) }}' - __first_found_options: - files: - - '{{ ansible_facts["distribution"] }}{{ ansible_facts["distribution_version"] }}.yml' - - '{{ ansible_facts["distribution"] }}{{ ansible_facts["distribution_major_version"] }}.yml' - - '{{ ansible_facts["distribution"] }}.yml' - - '{{ ansible_facts["os_family"] }}{{ ansible_facts["distribution_version"] }}.yml' - - '{{ ansible_facts["os_family"] }}{{ ansible_facts["distribution_major_version"] }}.yml' - - '{{ ansible_facts["os_family"] }}.yml' - paths: - - '{{ role_path }}/tasks' - skip: true - tags: - - 'always' - - -Make sure to set the tags directly on the `include_tasks` task, and not on a surrounding block. Setting it on a block causes the tag to be inherited to all tasks in that block, therefore also to included tasks. See the following example for details: - -.. code-block:: yaml - - # RedHat.yml - - block: - - - name: 'task 1' - ansible.builtin.debug: - msg: 'task 1 {{ test__var1 }}' - - tags: - - 'test' - - 'test:one' - - - - block: - - - name: 'task 2' - ansible.builtin.debug: - msg: 'task 2 {{ test__var2 }}' - - tags: - - 'test' - - - # main.yml - # THIS WORKS: - - name: 'Perform platform/version specific tasks' - ansible.builtin.include_tasks: 'RedHat.yml' - tags: - - 'test' - - 'test:one' - - # without tags, whole playbook: - # task 1 one - # task 2 two - - # --tags test - # task 1 one - # task 2 two - - # --tags test:one - # task 1 one - - # --tags other - # no debug output, and include_tasks is not running - - - # THIS DOES NOT WORK: - - block: - - - name: 'Perform platform/version specific tasks' - ansible.builtin.include_tasks: 'RedHat.yml' - - tags: - - 'test' - - 'test:one' - - # without tags, whole playbook: - # task 1 one - # task 2 two - - # --tags test - # task 1 one - # task 2 two - - # --tags test:one - # task 1 one - # task 2 two # we don't want this task to run - - # --tags other - # no debug output, and include_tasks is not running - - -OS-specific Variables ---------------------- - -You normally use ``vars/main.yml`` (automatically included) to set variables used by your role. If some variables need to be parameterized according to distribution and version (name of packages, configuration file paths, names of services), use OS-specific vars-files. - -Variables with the same name are overridden by the files in ``vars/`` in order from least specific to most specific: - -* ``os_family`` covers a group of closely related platforms (e.g. ``RedHat`` covers ``RHEL``, ``CentOS``, ``Fedora``) -* ``distribution`` (e.g. ``CentOS``) is more specific than os_family -* ``distribution_major_version`` (e.g. ``CentOS7``) is more specific than distribution -* ``distribution_version`` (e.g. ``CentOS7.9``) is the most specific - -As always be aware of the fact that dicts and lists are completely replaced, not merged. - -Include the ``platform-variables.yml`` in the ``tasks/main.yml`` like this, and set the tags appropriately (should contain all tags tasks that could require the variables): - -.. code-block:: yaml - - - name: 'Set platform/version specific variables' - ansible.builtin.import_role: - name: 'shared' - tasks_from: 'platform-variables.yml' - tags: - - 'role' - - 'role:tag1' # for example, tag for a task which requires a platform specific varialbe - -For this task, it does not matter if the tags are set directly on the task itself or on a surrounding block. - - -OS-specific Filenames -~~~~~~~~~~~~~~~~~~~~~ - -For example: - -* AIX.yml -* Amazon.yml -* Archlinux.yml -* CentOS.yml -* CentOS6.yml -* CentOS7.yml -* CentOS7.3.yml -* Container Linux by CoreOS.yml -* Debian.yml -* Debian11.yml -* Fedora.yml -* Fedora33.yml -* FreeBSD.yml -* Gentoo.yml -* OpenBSD.yml -* openSUSE Leap15.yml -* RedHat.yml -* RedHat8.yml -* RedHat8.2.yml -* Suse.yml -* Ubuntu.yml -* Ubuntu20.yml - - -Variables ---------- - -* ``./vars``: Variables that are not to be edited by users -* ``./defaults``: Default variables for the role, might be overridden by the user using group_vars or host_vars -* Naming scheme: ``___``, for example ``apache_httpd__server_admin``. -* Every argument accepted from outside of the role should be given a default value in ``defaults/main.yml``. This allows a single place for users to look to see what inputs are expected. Avoid giving default values in vars/main.yml as such values are very high in the precedence order and are difficult for users and consumers of a role to override. -* No need to invent new names, use the key-names from the config file (if possible), for example ``redis__conf_maxmemory``. -* Avoid embedding large lists or "magic values" directly into the playbook. Such static lists should be placed into the ``vars/main.yml`` file and named appropriately. -* If you need random but predictable/idempotent values, use the ``inventory_hostname`` as seed. Example for setting the minutes of an hour: ``{{ 59 | random(seed=inventory_hostname) }}`` -* Any secrets (passwords, tokens etc.) should not be provided with default values in the role. The tasks should be implemented in such a way that any secrets required, but not provided, should result in task execution failure. It is important for a secure-by-default implementation to ensure that an environment is not vulnerable due to the production use of default secrets. Deployers must be forced to properly provide their own secret variable values. Example: - - .. code-block:: yaml - - assert: - that: - - 'stig__grub2_password is defined' - - 'stig__grub2_password | length' - quiet: true - fail_msg: 'Please define bootloader passwords for your hosts ("stig__grub2_password").'' - - -``skip_role``-Variables in Playbooks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The ``playbook_name__role_name__skip_role`` and ``playbook_name__role_name__skip_role_injections`` variables should provide the user an option to skip the role and the role's injections respectively. Have a look at the `README.md <./README.md#skipping-roles-in-a-playbook`_. - -For this, we need to set the following two internal variables at the top of the playbook (between the ``hosts:`` and ``roles:``): - -.. code-block:: yaml - - vars: - - setup_icinga2_master__icingaweb2__skip_injections__internal_var: '{{ setup_icinga2_master__icingaweb2__skip_injections | d(setup_icinga2_master__icingaweb2__skip_role__internal_var) }}' - setup_icinga2_master__icingaweb2__skip_role__internal_var: '{{ setup_icinga2_master__icingaweb2__skip_role | d(false) }}' - -Then use them with the roles as follows: - -.. code-block:: yaml - - - role: 'linuxfabrik.lfops.icingaweb2' - when: - - 'not setup_icinga2_master__icingaweb2__skip_role__internal_var' - - - role: 'linuxfabrik.lfops.mariadb_server' - mariadb_server__databases__dependent_var: '{{ - (not setup_icinga2_master__icingaweb2__skip_injections__internal_var) | ternary(icingaweb2__mariadb_server__databases__dependent_var, []) - }}' - mariadb_server__users__dependent_var: '{{ - (not setup_icinga2_master__icingaweb2__skip_injections__internal_var) | ternary(icingaweb2__mariadb_server__users__dependent_var, []) + - }}' - -Make sure to use the following format when passing multiple injections to avoid needing to flatten the list: - -.. code-block:: yaml - - - role: 'linuxfabrik.lfops.icinga2_master' - icinga2_master__api_users__dependent_var: '{{ - (not setup_icinga2_master__icingadb__skip_injections__internal_var) | ternary(icingadb__icinga2_master__api_users__dependent_var, []) + - (not setup_icinga2_master__icingaweb2_module_director__skip_injections__internal_var) | ternary(icingaweb2_module_director__icinga2_master__api_users__dependent_var, []) + - (not setup_icinga2_master__icingaweb2__skip_injections__internal_var) | ternary(icingaweb2__icinga2_master__api_users__dependent_var, []) - }}' - - -Injections -~~~~~~~~~~ - -The goal of injections is that variables can be set in multiple places, and then merged in order to be used in the role. -For example, the user can overwrite a specific configuration role default (``__role_var``) from their inventory (``__host_var`` / ``__group_var``). - -Furthermore, other roles can also inject their sensible defaults via the ``__dependent_var``, with a higher precedence than the role defaults, but lower than the user's inventory. - -To enable this behavior, you must define the ``__combined_var`` as follows: - -.. code-block:: yaml - - # for list of dictionaries - my_role__my_var__dependent_var: [] - my_role__my_var__group_var: [] - my_role__my_var__host_var: [] - my_role__my_var__role_var: [] - my_role__my_var__combined_var: '{{ ( - my_role__my_var__role_var + - my_role__my_var__dependent_var + - my_role__my_var__group_var + - my_role__my_var__host_var - ) | linuxfabrik.lfops.combine_lod - }}' - - # for simple values like strings, numbers or booleans - my_role__my_var__dependent_var: '' - my_role__my_var__group_var: '' - my_role__my_var__host_var: '' - my_role__my_var__role_var: '' - my_role__my_var__combined_var: '{{ - my_role__my_var__host_var if (my_role__my_var__host_var | string | length) else - my_role__my_var__group_var if (my_role__my_var__group_var | string | length) else - my_role__my_var__dependent_var if (my_role__my_var__dependent_var | string | length) else - my_role__my_var__role_var - }}' - -The ``__combined_var`` will then be used in the tasks or templates of the role. - -The role must always implement some sort of ``state`` key, otherwise the user cannot "unselect" a value defined in the defaults. Suppose the user wants to disable the default localhost vHost of the Apache HTTPd role: - -.. code-block:: yaml - - # defaults/main.yml - apache_httpd__vhosts__role_var: - - - conf_server_name: 'localhost' - virtualhost_port: 80 - template: 'localhost' - -Without the ``state`` key, the user has no way of achieving this, as they cannot remove previously defined elements from the list via the inventory. With the ``state`` key, the role knows it has to remove the vHost: - -.. code-block:: yaml - - # inventory - apache_httpd__vhosts__role_var: - - - conf_server_name: 'localhost' - virtualhost_port: 80 - state: 'absent' - -The handling of the state in the role can look something like this, assuming the default value for ``state`` is ``present``: - -.. code-block:: yaml - - - name: 'Remove sites-available vHosts' - ansible.builtin.file: - path: '...' - state: 'absent' - when: - - 'item["state"] | d("present") == "absent"' - loop: '{{ apache_httpd__vhosts__combined_var }}' - - - name: 'Create sites-available vHosts' - ansible.builtin.template: - src: '...' - dest: '...' - when: - - 'item["state"] | d("present") != "absent"' - loop: '{{ apache_httpd__vhosts__combined_var }}' - -Other times it is useful to generate a list of present and absent elements, for example when using ``ansible.builtin.package``, as providing the packages as a list is much faster than looping through them. - -.. code-block:: yaml - - - name: 'Ensure PHP modules are absent' - ansible.builtin.package: - name: '{{ php__modules__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | map(attribute="name") }}' - state: 'absent' - - - name: 'Ensure PHP modules are present' - ansible.builtin.package: - name: '{{ (php__modules__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | map(attribute="name")) - + (php__modules__combined_var | selectattr("state", "undefined") | map(attribute="name")) }}' - state: 'present' - -Or in a Jinja2 template: - -.. code-block:: - - {% for item in apache_tomcat__roles__combined_var if item['state'] | d('present') != 'absent' %} - - {% endfor %} - -The vHost example above can be used to demonstrate another feature of ``linuxfabrik.lfops.combine_lod``. Normally, the list items are combined based on a ``unique_key`` that should match, for example, the ``name`` key. However, this does not work with ``conf_server_name`` because you can have a vHost with the same ``conf_server_name`` for multiple ports. This means that the ``unique_key`` must be a *combination* of ``conf_server_name`` and ``virtualhost_port``.: - -.. code-block:: yaml - - apache_httpd__vhosts__combined_var: '{{ ( - apache_httpd__vhosts__role_var + - apache_httpd__vhosts__dependent_var + - apache_httpd__vhosts__group_var + - apache_httpd__vhosts__host_var - ) | linuxfabrik.lfops.combine_lod(unique_key=["conf_server_name", "virtualhost_port"]) - }}' - -Note: - -* Have a look at ``ansible-doc --type filter linuxfabrik.lfops.combine_lod``. -* Always use lists of dictionaries or simple values. Never use dictionaries, even though they allow overwriting of earlier elemens, since one cannot template the keyname using Jinja2. This would prevent passing on of variables, especially in ``__dependent_var`` (for details have a look at https://docs.linuxfabrik.ch/software/ansible.html#besonderheiten-von-ansible). -* Simple value ``__combined_var`` are always returned as strings. Convert them to integers when using maths. - - -Ansible Facts / Magic Vars -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Always use ``ansible_facts``. Currently, Ansible recognizes both the new fact naming system (using ``ansible_facts``) and the old pre-2.5 "facts injected as separate variables" naming system. The old naming system will be deprecated in a future release of Ansible. - - -Documenting Variables -~~~~~~~~~~~~~~~~~~~~~ - -* Document variables in the ``README``. Have a look at ``python_venv/README.md`` on how this could look like. - - -Handling default values -~~~~~~~~~~~~~~~~~~~~~~~ - -1. A Jinja template contains vendor defaults using ``{{ variable | d('vendor-default-value') }}``. -2. Is overridden by ``defaults/main.yml`` using Linuxfabrik's best practice value ``variable: linuxfabrik-default-value``. -3. May be overriden by the customer by using a ``group_vars`` or ``host_vars`` definition. - - -Git Commits ------------ - -* | Since 2024-11-13, commit messages follow the `Conventional Commits specification `_ (``(): ``) - | Example: ``fix(roles/graylog_server): prevent warn on receiveBufferSize``. -* If there is an issue, the commit message must consist of the issue title followed by "(fix #issueno)", for example: ``fix(roles/graylog_server): prevent warn on receiveBufferSize (fix #341)``. -* For the first commit, use the message ``Add roles/`` or ``Add playbooks/``. - -```` must be one of the following: - -* chore: Changes to the build process or auxiliary tools and libraries such as documentation generation -* docs: Documentation only changes -* feat: A new feature -* fix: A bug fix -* perf: A code change that improves performance -* refactor: A code change that neither fixes a bug nor adds a feature -* style: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) -* test: Adding missing tests - - -Releases --------- - -Releases are available on Ansible Galaxy. Changelogs have to be written according to https://keepachangelog.com/en/1.0.0/. - - -Handling of GPG Keys under Debian (APT Keyring) ------------------------------------------------ - -Adding a key to ``/etc/apt/trusted.gpg.d`` is insecure because it adds the key for all repositories. Therefore, ``apt-key`` (and the ``ansible.builtin.apt_key`` module) were deprecated. - -The new and secure workflow is: - -1. Store the GPG key in ``/etc/apt/keyrings/``. The file extension **has** to match the file format. Use the ``file`` utility to determine the format: - - * ``PGP public key block Public-Key (old)``: ASCII-armored key. Use ``.asc`` extension. - * ``OpenPGP Public Key``: Binary GPG key. Use ``.gpg`` extension. - -2. Explicitly specify the path to the key in the ``/etc/apt/sources.list.d/`` file, for example: ``deb [signed-by=/etc/apt/keyrings/icinga.asc] https://...``. - -Have a look at the `repo_icinga/tasks/Debian.yml `__ (ASCII armored key) or `repo_mariadb/tasks/Debian.yml `(binary GPG key) roles. - - -Roles with Special Features ---------------------------- - -Roles with special technical implementations and capabilities: - -* | `github_project_createrepo `_ - | Sets FACL entries to allow both the webserver user and the github-project-createrepo user to access files. - -* | `librenms `_ - | Compiles and loads an SELinux module. - -* | `mongodb `_ - | The role implements a ``skip`` state that completely ignores the entry. - -* | `monitoring_plugins `_ - | Implements install & maintenance as well as uninstall/remove on Linux and Windows. - -* | `moodle `_ - | Searches for the latest and most recent specific LTS version of itself on GitHub. - -* | `nextcloud `_ - | The role performs some tasks only on the very first run and never again after that. To do this, it creates a state file for itself so that it knows that it must skip certain tasks on subsequent runs. - | The role's README has a concise but informative "Tags" section. - -* | `php `_ - | Build list for ansible.builtin.packages based on state ``present`` and ``absent``. - | Some Jinja templates use non-default strings marking the beginning/end of a block. - -* | `redis `_ - | Gathers the installed version and deploys the corresponding config file. - | Configures Systemd with Unit File overrides. - -* | `telegraf `_ - | Jinja templates use non-default strings marking the beginning/end of a print statement. - -* | `wordpress `_ - | chmod: Sets file and folder permissions separately using ``find``. - - -Credits -------- - -* https://github.com/whitecloud/ansible-styleguide -* https://redhat-cop.github.io/automation-good-practices -* https://docs.openstack.org/openstack-ansible/latest/contributor/code-rules.html diff --git a/README.md b/README.md index e834c7aae..fc4cd3d32 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ made by Linuxfabrik

-
+
![GitHub Stars](https://img.shields.io/github/stars/linuxfabrik/lfops) ![License](https://img.shields.io/github/license/linuxfabrik/lfops) @@ -31,19 +31,26 @@ LFOps is a comprehensive Ansible Collection providing 145+ playbooks and 160+ ro * [Requirements](#requirements) * [Installation](#installation) + * [Using ansible-galaxy](#using-ansible-galaxy) * [Development Setup](#development-setup) + * [Mitogen](#mitogen) + * [Mitogen with ansible-playbook](#mitogen-with-ansible-playbook) * [Mitogen with ansible-navigator](#mitogen-with-ansible-navigator) * [Mitogen Compatibility](#mitogen-compatibility) + * [Using ansible-navigator](#using-ansible-navigator) * [Usage](#usage) + * [Running a Playbook](#running-a-playbook) * [Typical Workflow Example](#typical-workflow-example) * [The "all" Playbook](#the-all-playbook) * [Skipping Roles in a Playbook](#skipping-roles-in-a-playbook) + * [Configuration](#configuration) + * [Recommended ansible.cfg](#recommended-ansiblecfg) * [LFOps-wide Variables](#lfops-wide-variables) * [Bitwarden Integration](#bitwarden-integration) @@ -406,12 +413,7 @@ See `ansible-doc -t lookup linuxfabrik.lfops.bitwarden_item` for all options. ## Documentation -* **Ansible Roles**: Each role has its own README file in [`roles//`](roles/). -* **Ansible Plugins**: Available through `ansible-doc`. For example: `ansible-doc linuxfabrik.lfops.gpg_key`. -* **Changelog**: [CHANGELOG.md](CHANGELOG.md) -* **Compatibility**: [COMPATIBILITY.md](COMPATIBILITY.md) -* **Contributing**: [CONTRIBUTING.rst](CONTRIBUTING.rst) -* **Issue Tracker**: [GitHub Issues](https://github.com/Linuxfabrik/lfops/issues) +Full documentation is available at [linuxfabrik.github.io/lfops](https://linuxfabrik.github.io/lfops/). It is automatically built and deployed on every push to `main`. ## Compatibility diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..d3a927239 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,101 @@ +# Security Policy + +## Supported Versions + +The latest version is currently supported and receives security updates. + + +## Reporting a Vulnerability + +We're extremely grateful for security researchers and users that report +vulnerabilities to us. All reports are thoroughly investigated by our team. + +Vulnerabilities are reported privately via GitHub's +[Security Advisories](https://docs.github.com/en/code-security/security-advisories) +feature. Please use the following link to submit your vulnerability: +[Report a vulnerability](https://github.com/Linuxfabrik/lfops/security/advisories/new) + +Please see +[Privately reporting a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) +for more information on how to submit a vulnerability using GitHub's interface. + + +### When Should I Report a Vulnerability? + +* You think you discovered a potential security vulnerability +* You are unsure how a vulnerability affects your system +* You think you discovered a vulnerability in another project that this project depends on + - For projects with their own vulnerability reporting and disclosure process, please report it directly there + +### When Should I NOT Report a Vulnerability? + +* You need help tuning your System for security +* You need help applying security related updates +* Your issue is not security related + + +### Vulnerability Response + +Each report is acknowledged and analyzed within 30 days. + +Any vulnerability information stays within this project and will not be disseminated to other projects +unless it is necessary to get the issue fixed. + +As the security issue moves from triage, to identified fix, to release planning +we will keep the reporter updated. + + +## Security Release & Disclosure Process + +Security vulnerabilities should be handled quickly and sometimes privately. The +primary goal of this process is to reduce the total time users are vulnerable +to publicly known exploits. + + +### Private Disclosure + +We ask that all suspected vulnerabilities be privately and responsibly +disclosed via the [private disclosure process](#reporting-a-vulnerability) +outlined above. + +Fixes may be developed and tested by our team in a +[temporary private fork](https://docs.github.com/en/code-security/security-advisories/repository-security-advisories/collaborating-in-a-temporary-private-fork-to-resolve-a-repository-security-vulnerability) +that are private from the general public if deemed necessary. + + +### Public Disclosure + +Vulnerabilities are disclosed publicly as GitHub [Security +Advisories](https://github.com/Linuxfabrik/lfops/security/advisories). + +A public disclosure date is negotiated by our team +and the bug submitter. We prefer to fully disclose the bug as soon as possible +once a user mitigation is available. It is reasonable to delay disclosure when +the bug or the fix is not yet fully understood, the solution is not +well-tested, or for vendor coordination. The timeframe for disclosure is from +immediate (especially if it's already publicly known) to several weeks. For a +vulnerability with a straightforward mitigation, we expect report date to +disclosure date to be on the order of 30 days. + +If you know of a publicly disclosed security vulnerability please IMMEDIATELY +[report the vulnerability](#reporting-a-vulnerability) to inform the team about the vulnerability so they may start the +patch, release, and communication process. + +If possible the team will ask the person making the public report if +the issue can be handled via a private disclosure process. If the reporter +denies the request, the team will move swiftly with the fix and +release process. In extreme cases you can ask GitHub to delete the issue but +this generally isn't necessary and is unlikely to make a public disclosure less +damaging. + +### Security Releases + +Once a fix is available it will be released and announced via the project on +GitHub. +Security releases will announced and clearly marked as a security release and +include information on which vulnerabilities were fixed. As much as possible +this announcement should be actionable, and include any mitigating steps users +can take prior to upgrading to a fixed version. + +Fixes will be applied in new releases and all fixed vulnerabilities will be noted in +the [CHANGELOG](./CHANGELOG.md). diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..0dd183a25 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,28 @@ +# Linuxfabrik LFOps + +Ansible Collection of roles, playbooks and plugins for Linux-based cloud infrastructure. Covers OS hardening, MariaDB, Icinga2, Nextcloud, FreeIPA, KVM and more. + +Made by [Linuxfabrik](https://www.linuxfabrik.ch). + + +## Overview + +LFOps is a comprehensive Ansible Collection providing 145+ playbooks and 160+ roles to bootstrap and manage Linux-based IT infrastructures. It covers the full server lifecycle from initial provisioning and hardening to application deployment, monitoring, and automated backups. LFOps supports RHEL 8/9/10, Debian, and Ubuntu. + + +## Quick Start + +1. Install via Galaxy: `ansible-galaxy collection install linuxfabrik.lfops` +2. Check [Compatibility](compatibility.md) for supported platforms +3. Browse [Roles](roles/acme_sh.md) and [Playbooks](playbooks.md) +4. See the main [README on GitHub](https://github.com/Linuxfabrik/lfops) for detailed setup instructions + + +## Links + +- [Ansible Galaxy](https://galaxy.ansible.com/ui/repo/published/linuxfabrik/lfops/) +- [GitHub Repository](https://github.com/Linuxfabrik/lfops) +- [Report an Issue](https://github.com/Linuxfabrik/lfops/issues/new/choose) +- [Linuxfabrik Monitoring Plugins](https://linuxfabrik.github.io/monitoring-plugins/) +- [Linuxfabrik Lib (Python libraries)](https://linuxfabrik.github.io/lib/lib.html) +- [Linuxfabrik Website](https://www.linuxfabrik.ch) diff --git a/execution-environment.yml b/execution-environment.yml index 3d4c89ee6..7bd685fb2 100644 --- a/execution-environment.yml +++ b/execution-environment.yml @@ -41,6 +41,7 @@ dependencies: system: - 'git-core' # required for ansible.builtin.git module - 'rsync' # required for ansible.posix.synchronize module + - 'sshpass' # required for ssh connections with passwords, eg via `--ask-pass` additional_build_files: - src: 'linuxfabrik-lfops.tar.gz' diff --git a/playbooks/README.md b/playbooks/README.md new file mode 100644 index 000000000..201b80e52 --- /dev/null +++ b/playbooks/README.md @@ -0,0 +1,1366 @@ +# Playbooks + +This document lists all playbooks and the roles they call. Skip variables allow you to disable specific roles or dependencies within a playbook. Set them to `true` in your inventory to skip the corresponding role. + +All skip variables are booleans and default to `false` unless noted otherwise. + +For playbooks with role injections (typically `setup_*` playbooks), two types of skip variables are available: + +* `playbook__role__skip_role`: Skips the role entirely and disables its injections into other roles. +* `playbook__role__skip_injections`: Only disables the injections, the role itself still runs. + +See the [main README](../README.md#skipping-roles-in-a-playbook) for details. + + +## acme_sh.yml + +Calls the following roles (in order): + +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps) +* [acme_sh](https://github.com/Linuxfabrik/lfops/tree/main/roles/acme_sh) + + +## alternatives.yml + +Calls the following roles (in order): + +* [alternatives](https://github.com/Linuxfabrik/lfops/tree/main/roles/alternatives) + + +## ansible_init.yml + +Calls the following roles (in order): + +* [ansible_init](https://github.com/Linuxfabrik/lfops/tree/main/roles/ansible_init) + + +## apache_httpd.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `apache_httpd__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `apache_httpd__skip_repo_epel` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `apache_httpd__skip_policycoreutils` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `apache_httpd__skip_selinux` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `apache_httpd__skip_python` +* [apache_httpd](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_httpd) + + +## apache_solr.yml + +Calls the following roles (in order): + +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps) +* [apache_solr](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_solr) + + +## apache_tomcat.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `apache_tomcat__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils) +* [apache_tomcat](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_tomcat) + + +## apps.yml + +Calls the following roles (in order): + +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps) + + +## at.yml + +Calls the following roles (in order): + +* [at](https://github.com/Linuxfabrik/lfops/tree/main/roles/at) + + +## audit.yml + +Calls the following roles (in order): + +* [audit](https://github.com/Linuxfabrik/lfops/tree/main/roles/audit) + + +## bind.yml + +Calls the following roles (in order): + +* [bind](https://github.com/Linuxfabrik/lfops/tree/main/roles/bind) + + +## blocky.yml + +Calls the following roles (in order): + +* [blocky](https://github.com/Linuxfabrik/lfops/tree/main/roles/blocky) + + +## borg_local.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `borg_local__skip_repo_baseos` +* [systemd_unit](https://github.com/Linuxfabrik/lfops/tree/main/roles/systemd_unit): `borg_local__skip_systemd_unit` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `borg_local__skip_repo_epel` +* [borg_local](https://github.com/Linuxfabrik/lfops/tree/main/roles/borg_local) + + +## chrony.yml + +Calls the following roles (in order): + +* [chrony](https://github.com/Linuxfabrik/lfops/tree/main/roles/chrony) + + +## clamav.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `clamav__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `clamav__skip_repo_epel` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `clamav__skip_policycoreutils` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `clamav__skip_selinux` +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `clamav__skip_python_venv` +* [clamav](https://github.com/Linuxfabrik/lfops/tree/main/roles/clamav) +* [fangfrisch](https://github.com/Linuxfabrik/lfops/tree/main/roles/fangfrisch): `clamav__skip_fangfrisch` + + +## cloud_init.yml + +Calls the following roles (in order): + +* [cloud_init](https://github.com/Linuxfabrik/lfops/tree/main/roles/cloud_init) + + +## cockpit.yml + +Calls the following roles (in order): + +* [cockpit](https://github.com/Linuxfabrik/lfops/tree/main/roles/cockpit) + + +## collabora.yml + +Calls the following roles (in order): + +* [repo_collabora_code](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_collabora_code): `collabora__skip_repo_collabora_code` +* [repo_collabora](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_collabora): `collabora__skip_repo_collabora` (default: `true`), `collabora__skip_repo_collabora_code` +* [collabora](https://github.com/Linuxfabrik/lfops/tree/main/roles/collabora) + + +## collect_rpmnew_rpmsave.yml + +Calls the following roles (in order): + +* [collect_rpmnew_rpmsave](https://github.com/Linuxfabrik/lfops/tree/main/roles/collect_rpmnew_rpmsave) + + +## coturn.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `coturn__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `coturn__skip_repo_epel` +* [coturn](https://github.com/Linuxfabrik/lfops/tree/main/roles/coturn) + + +## crypto_policy.yml + +Calls the following roles (in order): + +* [crypto_policy](https://github.com/Linuxfabrik/lfops/tree/main/roles/crypto_policy) + + +## dnf_makecache.yml + +Calls the following roles (in order): + +* [dnf_makecache](https://github.com/Linuxfabrik/lfops/tree/main/roles/dnf_makecache) + + +## dnf_versionlock.yml + +Calls the following roles (in order): + +* [dnf_versionlock](https://github.com/Linuxfabrik/lfops/tree/main/roles/dnf_versionlock) + + +## docker.yml + +Calls the following roles (in order): + +* [repo_docker](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_docker): `docker__skip_repo_docker` +* [docker](https://github.com/Linuxfabrik/lfops/tree/main/roles/docker) +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `docker__skip_kernel_settings` + + +## duplicity.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `duplicity__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `duplicity__skip_python_venv` +* [haveged](https://github.com/Linuxfabrik/lfops/tree/main/roles/haveged): `duplicity__skip_haveged` +* [duplicity](https://github.com/Linuxfabrik/lfops/tree/main/roles/duplicity) + + +## elastic_agent.yml + +Calls the following roles (in order): + +* [repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch): `elastic_agent__skip_repo_elasticsearch` +* [elastic_agent](https://github.com/Linuxfabrik/lfops/tree/main/roles/elastic_agent) + + +## elastic_agent_fleet_server.yml + +Calls the following roles (in order): + +* [repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch): `elastic_agent_fleet_server__skip_repo_elasticsearch` +* [elastic_agent_fleet_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/elastic_agent_fleet_server) + + +## elasticsearch.yml + +Calls the following roles (in order): + +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `elasticsearch__skip_kernel_settings` +* [repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch): `elasticsearch__skip_repo_elasticsearch` +* [elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/elasticsearch) + + +## exoscale_vm.yml + +Calls the following roles (in order): + +* [exoscale_vm](https://github.com/Linuxfabrik/lfops/tree/main/roles/exoscale_vm) + + +## fail2ban.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `fail2ban__skip_repo_baseos` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils) +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `fail2ban__skip_selinux` +* [sshd](https://github.com/Linuxfabrik/lfops/tree/main/roles/sshd): `fail2ban__skip_sshd` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `fail2ban__skip_repo_epel` +* [firewall](https://github.com/Linuxfabrik/lfops/tree/main/roles/firewall): `fail2ban__skip_firewall` +* [fail2ban](https://github.com/Linuxfabrik/lfops/tree/main/roles/fail2ban) + + +## fangfrisch.yml + +Calls the following roles (in order): + +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `clamav__skip_python_venv` +* [fangfrisch](https://github.com/Linuxfabrik/lfops/tree/main/roles/fangfrisch) + + +## ffmpeg.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `ffmpeg__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `ffmpeg__skip_repo_epel` +* [repo_rpmfusion](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_rpmfusion): `ffmpeg__skip_repo_rpmfusion` +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps) + + +## files.yml + +Calls the following roles (in order): + +* [files](https://github.com/Linuxfabrik/lfops/tree/main/roles/files) + + +## firewall.yml + +Calls the following roles (in order): + +* [firewall](https://github.com/Linuxfabrik/lfops/tree/main/roles/firewall) + + +## freeipa_client.yml + +Calls the following roles (in order): + +* [freeipa_client](https://github.com/Linuxfabrik/lfops/tree/main/roles/freeipa_client) + + +## freeipa_server.yml + +Calls the following roles (in order): + +* [freeipa_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/freeipa_server) + + +## github_project_createrepo.yml + +Calls the following roles (in order): + +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps): `github_project_createrepo__skip_apps` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `github_project_createrepo__skip_python` +* [github_project_createrepo](https://github.com/Linuxfabrik/lfops/tree/main/roles/github_project_createrepo) + + +## gitlab_ce.yml + +Calls the following roles (in order): + +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils) +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `gitlab_ce__skip_kernel_settings` +* [repo_gitlab_ce](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_gitlab_ce): `gitlab_ce__skip_repo_gitlab_ce` +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps) +* [gitlab_ce](https://github.com/Linuxfabrik/lfops/tree/main/roles/gitlab_ce) + + +## glances.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `glances__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [glances](https://github.com/Linuxfabrik/lfops/tree/main/roles/glances) + + +## glpi_agent.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `glpi_agent__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `glpi_agent__skip_repo_epel` +* [glpi_agent](https://github.com/Linuxfabrik/lfops/tree/main/roles/glpi_agent) + + +## grafana.yml + +Calls the following roles (in order): + +* [repo_grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_grafana): `grafana_server__skip_repo_grafana` +* [grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/grafana) + + +## grafana_grizzly.yml + +Calls the following roles (in order): + +* [repo_grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_grafana): `grafana_server__skip_repo_grafana` +* [grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/grafana): `grafana_server__skip_grafana` +* [grafana_grizzly](https://github.com/Linuxfabrik/lfops/tree/main/roles/grafana_grizzly) + + +## haveged.yml + +Calls the following roles (in order): + +* [haveged](https://github.com/Linuxfabrik/lfops/tree/main/roles/haveged) + + +## hetzner_vm.yml + +Calls the following roles (in order): + +* [hetzner_vm](https://github.com/Linuxfabrik/lfops/tree/main/roles/hetzner_vm) + + +## hostname.yml + +Calls the following roles (in order): + +* [hostname](https://github.com/Linuxfabrik/lfops/tree/main/roles/hostname) + + +## icinga2_agent.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `icinga2_agent__skip_repo_baseos` +* [repo_icinga](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_icinga): `icinga2_agent__skip_repo_icinga` +* [icinga2_agent](https://github.com/Linuxfabrik/lfops/tree/main/roles/icinga2_agent) +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `icinga2_agent__skip_repo_epel` (default: `true`) +* [repo_monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_monitoring_plugins): `icinga2_agent__skip_repo_monitoring_plugins` +* [monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/monitoring_plugins): `icinga2_agent__skip_monitoring_plugins` + + +## icingaweb2.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `icingaweb2__skip_repo_baseos` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `icingaweb2__skip_kernel_settings` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [repo_mydumper](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mydumper): `icingaweb2__skip_repo_mydumper` +* [repo_mariadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb): `icingaweb2__skip_repo_mariadb` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `icingaweb2__skip_python` +* [mariadb_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/mariadb_server): `icingaweb2__skip_mariadb_server` +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `icingaweb2__skip_yum_utils` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `icingaweb2__skip_repo_remi` +* [php](https://github.com/Linuxfabrik/lfops/tree/main/roles/php): `icingaweb2__skip_php` +* [repo_icinga](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_icinga): `icingaweb2__skip_repo_icinga` +* [icingaweb2](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2) + + +## icingaweb2_module_businessprocess.yml + +Calls the following roles (in order): + +* [icingaweb2_module_businessprocess](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_businessprocess) + + +## icingaweb2_module_company.yml + +Calls the following roles (in order): + +* [icingaweb2_module_company](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_company) + + +## icingaweb2_module_director.yml + +Calls the following roles (in order): + +* [icingaweb2_module_director](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_director) + + +## icingaweb2_module_doc.yml + +Calls the following roles (in order): + +* [icingaweb2_module_doc](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_doc) + + +## icingaweb2_module_incubator.yml + +Calls the following roles (in order): + +* [icingaweb2_module_incubator](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_incubator) + + +## icingaweb2_module_pdfexport.yml + +Calls the following roles (in order): + +* [icingaweb2_module_pdfexport](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_pdfexport) + + +## influxdb.yml + +Calls the following roles (in order): + +* [repo_influxdb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_influxdb) +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `influxdb__skip_python_venv` +* [influxdb](https://github.com/Linuxfabrik/lfops/tree/main/roles/influxdb) + + +## infomaniak_vm.yml + +Calls the following roles (in order): + +* [infomaniak_vm](https://github.com/Linuxfabrik/lfops/tree/main/roles/infomaniak_vm) + + +## kdump.yml + +Calls the following roles (in order): + +* [kdump](https://github.com/Linuxfabrik/lfops/tree/main/roles/kdump) + + +## keepalived.yml + +Calls the following roles (in order): + +* [keepalived](https://github.com/Linuxfabrik/lfops/tree/main/roles/keepalived) + + +## kernel_settings.yml + +Calls the following roles (in order): + +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings) + + +## kibana.yml + +Calls the following roles (in order): + +* [repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch): `kibana__skip_repo_elasticsearch` +* [kibana](https://github.com/Linuxfabrik/lfops/tree/main/roles/kibana) + + +## kvm_host.yml + +Calls the following roles (in order): + +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `kvm_host__skip_python` +* [kvm_host](https://github.com/Linuxfabrik/lfops/tree/main/roles/kvm_host) + + +## libmaxminddb.yml + +Calls the following roles (in order): + +* [libmaxminddb](https://github.com/Linuxfabrik/lfops/tree/main/roles/libmaxminddb) + + +## libreoffice.yml + +Calls the following roles (in order): + +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux) +* [libreoffice](https://github.com/Linuxfabrik/lfops/tree/main/roles/libreoffice) + + +## login.yml + +Calls the following roles (in order): + +* [login](https://github.com/Linuxfabrik/lfops/tree/main/roles/login) + + +## logrotate.yml + +Calls the following roles (in order): + +* [logrotate](https://github.com/Linuxfabrik/lfops/tree/main/roles/logrotate) + + +## logstash.yml + +Calls the following roles (in order): + +* [repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch): `logstash__skip_repo_elasticsearch` +* [logstash](https://github.com/Linuxfabrik/lfops/tree/main/roles/logstash) + + +## lvm.yml + +Calls the following roles (in order): + +* [lvm](https://github.com/Linuxfabrik/lfops/tree/main/roles/lvm) + + +## mailto_root.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `mailto_root__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [postfix](https://github.com/Linuxfabrik/lfops/tree/main/roles/postfix): `mailto_root__skip_postfix` +* [mailx](https://github.com/Linuxfabrik/lfops/tree/main/roles/mailx): `mailto_root__skip_mailx` +* [mailto_root](https://github.com/Linuxfabrik/lfops/tree/main/roles/mailto_root) + + +## mailx.yml + +Calls the following roles (in order): + +* [mailx](https://github.com/Linuxfabrik/lfops/tree/main/roles/mailx) + + +## mariadb_server.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `mariadb_server__skip_repo_baseos` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `mariadb_server__skip_kernel_settings` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `mariadb_server__skip_repo_epel` +* [repo_mydumper](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mydumper): `mariadb_server__skip_repo_mydumper` +* [repo_mariadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb): `mariadb_server__skip_repo_mariadb` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `mariadb_server__skip_python` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `mariadb_server__skip_policycoreutils` +* [mariadb_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/mariadb_server) + + +## maxmind_geoip.yml + +Calls the following roles (in order): + +* [maxmind_geoip](https://github.com/Linuxfabrik/lfops/tree/main/roles/maxmind_geoip) +* [systemd_unit](https://github.com/Linuxfabrik/lfops/tree/main/roles/systemd_unit): `maxmind_geoip__skip_systemd_unit` + + +## minio_client.yml + +Calls the following roles (in order): + +* [minio_client](https://github.com/Linuxfabrik/lfops/tree/main/roles/minio_client) + + +## mirror.yml + +Calls the following roles (in order): + +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps): `mirror__skip_apps` +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `mirror__skip_yum_utils` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `mirror__skip_python` +* [mirror](https://github.com/Linuxfabrik/lfops/tree/main/roles/mirror) + + +## mod_maxminddb.yml + +Calls the following roles (in order): + +* [mod_maxminddb](https://github.com/Linuxfabrik/lfops/tree/main/roles/mod_maxminddb) + + +## mongodb.yml + +Calls the following roles (in order): + +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `mongodb__skip_kernel_settings` +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `mongodb__skip_python_venv` +* [repo_mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mongodb): `mongodb__skip_repo_mongodb` +* [mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/mongodb) + + +## monitoring_plugins.yml + +Calls the following roles (in order): + +* [repo_monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_monitoring_plugins): `monitoring_plugins__skip_repo_monitoring_plugins` +* [monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/monitoring_plugins) + + +## monitoring_plugins_grafana_dashboards.yml + +Calls the following roles (in order): + +* [repo_grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_grafana): `monitoring_plugins_grafana_dashboards__skip_repo_grafana` +* [grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/grafana): `monitoring_plugins_grafana_dashboards__skip_grafana`, `monitoring_plugins_grafana_dashboards__skip_grafana_grizzly` +* [grafana_grizzly](https://github.com/Linuxfabrik/lfops/tree/main/roles/grafana_grizzly): `monitoring_plugins_grafana_dashboards__skip_grafana_grizzly` +* [monitoring_plugins_grafana_dashboards](https://github.com/Linuxfabrik/lfops/tree/main/roles/monitoring_plugins_grafana_dashboards) + + +## motd.yml + +Calls the following roles (in order): + +* [motd](https://github.com/Linuxfabrik/lfops/tree/main/roles/motd) + + +## mount.yml + +Calls the following roles (in order): + +* [mount](https://github.com/Linuxfabrik/lfops/tree/main/roles/mount) + + +## network.yml + +Calls the following roles (in order): + +* [network](https://github.com/Linuxfabrik/lfops/tree/main/roles/network) + + +## nfs_client.yml + +Calls the following roles (in order): + +* [nfs_client](https://github.com/Linuxfabrik/lfops/tree/main/roles/nfs_client) + + +## nfs_server.yml + +Calls the following roles (in order): + +* [nfs_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/nfs_server) + + +## nodejs.yml + +Calls the following roles (in order): + +* [nodejs](https://github.com/Linuxfabrik/lfops/tree/main/roles/nodejs) + + +## objectstore_backup.yml + +Calls the following roles (in order): + +* [minio_client](https://github.com/Linuxfabrik/lfops/tree/main/roles/minio_client): `objectstore_backup__skip_minio_client` +* [objectstore_backup](https://github.com/Linuxfabrik/lfops/tree/main/roles/objectstore_backup) + + +## open_vm_tools.yml + +Calls the following roles (in order): + +* [open_vm_tools](https://github.com/Linuxfabrik/lfops/tree/main/roles/open_vm_tools) + + +## opensearch.yml + +Calls the following roles (in order): + +* [repo_opensearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_opensearch): `opensearch__skip_repo_opensearch` +* [opensearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/opensearch) + + +## openvpn_server.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `openvpn_server__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `openvpn_server__skip_repo_epel` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `openvpn_server__skip_policycoreutils` +* [openvpn_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/openvpn_server) + + +## php.yml + +Calls the following roles (in order): + +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `php__skip_yum_utils` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `php__skip_repo_remi` +* [php](https://github.com/Linuxfabrik/lfops/tree/main/roles/php) + + +## podman_containers.yml + +Calls the following roles (in order): + +* [podman_containers](https://github.com/Linuxfabrik/lfops/tree/main/roles/podman_containers) + + +## policycoreutils.yml + +Calls the following roles (in order): + +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils) + + +## postfix.yml + +Calls the following roles (in order): + +* [mailx](https://github.com/Linuxfabrik/lfops/tree/main/roles/mailx): `postfix__skip_mailx` +* [postfix](https://github.com/Linuxfabrik/lfops/tree/main/roles/postfix) + + +## postgresql_server.yml + +Calls the following roles (in order): + +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `postgresql_server__skip_python` +* [repo_postgresql](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_postgresql): `postgresql_server__skip_repo_postgresql` +* [postgresql_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/postgresql_server) + + +## proxysql.yml + +Calls the following roles (in order): + +* [repo_proxysql](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_proxysql): `proxysql__skip_repo_proxysql` +* [proxysql](https://github.com/Linuxfabrik/lfops/tree/main/roles/proxysql) + + +## python.yml + +Calls the following roles (in order): + +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python) + + +## python_venv.yml + +Calls the following roles (in order): + +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `python_venv__skip_python` +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv) + + +## qemu_guest_agent.yml + +Calls the following roles (in order): + +* [qemu_guest_agent](https://github.com/Linuxfabrik/lfops/tree/main/roles/qemu_guest_agent) + + +## redis.yml + +Calls the following roles (in order): + +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `redis__skip_kernel_settings` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `redis__skip_repo_remi` +* [repo_redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_redis) +* [redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/redis) + + +## repo_baseos.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos) + + +## repo_collabora.yml + +Calls the following roles (in order): + +* [repo_collabora](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_collabora) + + +## repo_collabora_code.yml + +Calls the following roles (in order): + +* [repo_collabora_code](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_collabora_code) + + +## repo_debian_base.yml + +Calls the following roles (in order): + +* [repo_debian_base](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_debian_base) + + +## repo_docker.yml + +Calls the following roles (in order): + +* [repo_docker](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_docker) + + +## repo_elasticsearch.yml + +Calls the following roles (in order): + +* [repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch) + + +## repo_epel.yml + +Calls the following roles (in order): + +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) + + +## repo_gitlab_ce.yml + +Calls the following roles (in order): + +* [repo_gitlab_ce](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_gitlab_ce) + + +## repo_gitlab_runner.yml + +Calls the following roles (in order): + +* [repo_gitlab_runner](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_gitlab_runner) + + +## repo_grafana.yml + +Calls the following roles (in order): + +* [repo_grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_grafana) + + +## repo_graylog.yml + +Calls the following roles (in order): + +* [repo_graylog](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_graylog) + + +## repo_icinga.yml + +Calls the following roles (in order): + +* [repo_icinga](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_icinga) + + +## repo_influxdb.yml + +Calls the following roles (in order): + +* [repo_influxdb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_influxdb) + + +## repo_mariadb.yml + +Calls the following roles (in order): + +* [repo_mariadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb) + + +## repo_mongodb.yml + +Calls the following roles (in order): + +* [repo_mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mongodb) + + +## repo_monitoring_plugins.yml + +Calls the following roles (in order): + +* [repo_monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_monitoring_plugins) + + +## repo_mydumper.yml + +Calls the following roles (in order): + +* [repo_mydumper](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mydumper) + + +## repo_opensearch.yml + +Calls the following roles (in order): + +* [repo_opensearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_opensearch) + + +## repo_postgresql.yml + +Calls the following roles (in order): + +* [repo_postgresql](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_postgresql) + + +## repo_proxysql.yml + +Calls the following roles (in order): + +* [repo_proxysql](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_proxysql) + + +## repo_redis.yml + +Calls the following roles (in order): + +* [repo_redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_redis) + + +## repo_remi.yml + +Calls the following roles (in order): + +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils) +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `repo_remi__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi) + + +## repo_rpmfusion.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `repo_rpmfusion__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `repo_rpmfusion__skip_repo_epel` +* [repo_rpmfusion](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_rpmfusion) + + +## repo_sury.yml + +Calls the following roles (in order): + +* [repo_sury](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_sury) + + +## rsyslog.yml + +Calls the following roles (in order): + +* [rsyslog](https://github.com/Linuxfabrik/lfops/tree/main/roles/rsyslog) + + +## selinux.yml + +Calls the following roles (in order): + +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `selinux__skip_policycoreutils` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux) + + +## setup_basic.yml + +Calls the following roles (in order): + +* [network](https://github.com/Linuxfabrik/lfops/tree/main/roles/network): `setup_basic__skip_network` +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_basic__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `setup_basic__skip_repo_epel` +* [crypto_policy](https://github.com/Linuxfabrik/lfops/tree/main/roles/crypto_policy): `setup_basic__skip_crypto_policy` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils) +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_basic__skip_selinux` +* [systemd_journald](https://github.com/Linuxfabrik/lfops/tree/main/roles/systemd_journald): `setup_basic__skip_systemd_journald` +* [hostname](https://github.com/Linuxfabrik/lfops/tree/main/roles/hostname): `setup_basic__skip_hostname` +* [timezone](https://github.com/Linuxfabrik/lfops/tree/main/roles/timezone): `setup_basic__skip_timezone` +* [logrotate](https://github.com/Linuxfabrik/lfops/tree/main/roles/logrotate): `setup_basic__skip_logrotate` +* [rsyslog](https://github.com/Linuxfabrik/lfops/tree/main/roles/rsyslog): `setup_basic__skip_rsyslog` +* [cloud_init](https://github.com/Linuxfabrik/lfops/tree/main/roles/cloud_init): `setup_basic__skip_cloud_init` +* [cockpit](https://github.com/Linuxfabrik/lfops/tree/main/roles/cockpit): `setup_basic__skip_cockpit` +* [dnf_makecache](https://github.com/Linuxfabrik/lfops/tree/main/roles/dnf_makecache): `setup_basic__skip_dnf_makecache` +* [kdump](https://github.com/Linuxfabrik/lfops/tree/main/roles/kdump): `setup_basic__skip_kdump` +* [chrony](https://github.com/Linuxfabrik/lfops/tree/main/roles/chrony): `setup_basic__skip_chrony` +* [motd](https://github.com/Linuxfabrik/lfops/tree/main/roles/motd): `setup_basic__skip_motd` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `setup_basic__skip_python`, `setup_basic__skip_python_venv` +* [glances](https://github.com/Linuxfabrik/lfops/tree/main/roles/glances): `setup_basic__skip_glances` +* [tools](https://github.com/Linuxfabrik/lfops/tree/main/roles/tools): `setup_basic__skip_tools` +* [at](https://github.com/Linuxfabrik/lfops/tree/main/roles/at): `setup_basic__skip_at` +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `setup_basic__skip_yum_utils` +* [lvm](https://github.com/Linuxfabrik/lfops/tree/main/roles/lvm): `setup_basic__skip_lvm` +* [sshd](https://github.com/Linuxfabrik/lfops/tree/main/roles/sshd): `setup_basic__skip_sshd` +* [login](https://github.com/Linuxfabrik/lfops/tree/main/roles/login): `setup_basic__skip_login` +* [firewall](https://github.com/Linuxfabrik/lfops/tree/main/roles/firewall): `setup_basic__skip_firewall` +* [mailx](https://github.com/Linuxfabrik/lfops/tree/main/roles/mailx): `setup_basic__skip_mailx` +* [postfix](https://github.com/Linuxfabrik/lfops/tree/main/roles/postfix): `setup_basic__skip_postfix` +* [mailto_root](https://github.com/Linuxfabrik/lfops/tree/main/roles/mailto_root): `setup_basic__skip_mailto_root` +* [system_update](https://github.com/Linuxfabrik/lfops/tree/main/roles/system_update): `setup_basic__skip_system_update` +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `setup_basic__skip_python_venv` +* [duplicity](https://github.com/Linuxfabrik/lfops/tree/main/roles/duplicity): `setup_basic__skip_duplicity` +* [repo_monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_monitoring_plugins): `setup_basic__skip_repo_monitoring_plugins` +* [monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/monitoring_plugins): `setup_basic__skip_monitoring_plugins` +* [repo_icinga](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_icinga): `setup_basic__skip_repo_icinga` +* [icinga2_agent](https://github.com/Linuxfabrik/lfops/tree/main/roles/icinga2_agent): `setup_basic__skip_icinga2_agent` + + +## setup_grav.yml + +Calls the following roles (in order): + +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `grav__skip_yum_utils` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `grav__skip_policycoreutils` +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps): `grav__skip_apps` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `grav__skip_repo_remi` +* [php](https://github.com/Linuxfabrik/lfops/tree/main/roles/php): `grav__skip_php` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_grav__skip_selinux` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `grav__skip_python` +* [apache_httpd](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_httpd): `grav__skip_apache_httpd` +* [grav](https://github.com/Linuxfabrik/lfops/tree/main/roles/grav) + + +## setup_graylog_datanode.yml + +Calls the following roles (in order): + +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `setup_graylog_datanode__skip_policycoreutils` +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_graylog_datanode__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `setup_graylog_datanode__skip_python_venv` +* [repo_mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mongodb): `setup_graylog_datanode__skip_repo_mongodb` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `setup_graylog_datanode__skip_kernel_settings` +* [mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/mongodb): `setup_graylog_datanode__skip_mongodb` +* [repo_graylog](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_graylog): `setup_graylog_datanode__skip_repo_graylog` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_graylog_datanode__skip_selinux` +* [graylog_datanode](https://github.com/Linuxfabrik/lfops/tree/main/roles/graylog_datanode) + + +## setup_graylog_server.yml + +Calls the following roles (in order): + +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `setup_graylog_server__skip_policycoreutils` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_graylog_server__skip_selinux` +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_graylog_server__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `setup_graylog_server__skip_python_venv` +* [repo_mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mongodb): `setup_graylog_server__skip_repo_mongodb` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `setup_graylog_server__skip_kernel_settings` +* [mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/mongodb): `setup_graylog_server__skip_mongodb` +* [repo_graylog](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_graylog): `setup_graylog_server__skip_repo_graylog` +* [graylog_datanode](https://github.com/Linuxfabrik/lfops/tree/main/roles/graylog_datanode): `setup_graylog_server__skip_graylog_datanode` +* [graylog_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/graylog_server) + + +## setup_icinga2_master.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_icinga2_master__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `setup_icinga2_master__repo_epel__skip_role` +* [repo_mariadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb): `setup_icinga2_master__repo_mariadb__skip_role` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `setup_icinga2_master__python__skip_role` +* [repo_mydumper](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mydumper): `setup_icinga2_master__repo_mydumper__skip_role` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `setup_icinga2_master__policycoreutils__skip_role` +* [mariadb_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/mariadb_server): `setup_icinga2_master__mariadb_server__skip_role` +* [repo_influxdb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_influxdb): `setup_icinga2_master__repo_influxdb__skip_role` +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `setup_icinga2_master__python_venv__skip_role` +* [repo_icinga](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_icinga): `setup_icinga2_master__repo_icinga__skip_role` +* [icinga2_master](https://github.com/Linuxfabrik/lfops/tree/main/roles/icinga2_master): `setup_icinga2_master__icinga2_master__skip_role` +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `setup_icinga2_master__yum_utils__skip_role` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `setup_icinga2_master__repo_remi__skip_role` +* [repo_sury](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_sury): `setup_icinga2_master__repo_sury__skip_role` +* [php](https://github.com/Linuxfabrik/lfops/tree/main/roles/php): `setup_icinga2_master__php__skip_role` +* [apache_httpd](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_httpd): `setup_icinga2_master__apache_httpd__skip_role` +* [icingaweb2](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2): `setup_icinga2_master__icingaweb2__skip_role` +* [repo_monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_monitoring_plugins): `setup_icinga2_master__repo_monitoring_plugins__skip_role` +* [monitoring_plugins](https://github.com/Linuxfabrik/lfops/tree/main/roles/monitoring_plugins): `setup_icinga2_master__monitoring_plugins__skip_role` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_icinga2_master__selinux__skip_role` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `setup_icinga2_master__kernel_settings__skip_role` +* [repo_redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_redis): `setup_icinga2_master__repo_redis__skip_role` +* [redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/redis): `setup_icinga2_master__redis__skip_role` +* [icingadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingadb): `setup_icinga2_master__icingadb__skip_role` +* [icinga_kubernetes](https://github.com/Linuxfabrik/lfops/tree/main/roles/icinga_kubernetes): `setup_icinga2_master__icinga_kubernetes__skip_role` (default: `true`) +* [icinga_kubernetes_web](https://github.com/Linuxfabrik/lfops/tree/main/roles/icinga_kubernetes_web): `setup_icinga2_master__icinga_kubernetes_web__skip_role` (default: `true`) +* [icingadb_web](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingadb_web): `setup_icinga2_master__icingadb_web__skip_role` +* [icingaweb2_module_doc](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_doc): `setup_icinga2_master__icingaweb2_module_doc__skip_role` +* [icingaweb2_module_fileshipper](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_fileshipper): `setup_icinga2_master__icingaweb2_module_fileshipper__skip_role` (default: `true`) +* [icingaweb2_module_generictts](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_generictts): `setup_icinga2_master__icingaweb2_module_generictts__skip_role` (default: `true`) +* [icingaweb2_module_x509](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_x509): `setup_icinga2_master__icingaweb2_module_x509__skip_role` (default: `true`) +* [icingaweb2_module_reporting](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_reporting): `setup_icinga2_master__icingaweb2_module_reporting__skip_role` (default: `true`) +* [icingaweb2_module_businessprocess](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_businessprocess): `setup_icinga2_master__icingaweb2_module_businessprocess__skip_role` (default: `true`) +* [icingaweb2_module_company](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_company): `setup_icinga2_master__icingaweb2_module_company__skip_role` (default: `true`) +* [icingaweb2_module_cube](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_cube): `setup_icinga2_master__icingaweb2_module_cube__skip_role` (default: `true`) +* [icingaweb2_theme_linuxfabrik](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_theme_linuxfabrik): `setup_icinga2_master__icingaweb2_theme_linuxfabrik__skip_role` +* [icingaweb2_module_incubator](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_incubator): `setup_icinga2_master__icingaweb2_module_incubator__skip_role` +* [icingaweb2_module_jira](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_jira): `setup_icinga2_master__icingaweb2_module_jira__skip_role` (default: `true`) +* [icingaweb2_module_pdfexport](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_pdfexport): `setup_icinga2_master__icingaweb2_module_pdfexport__skip_role` (default: `true`) +* [icingaweb2_module_vspheredb](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_vspheredb): `setup_icinga2_master__icingaweb2_module_vspheredb__skip_role` (default: `true`) +* [icingaweb2_module_director](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_director): `setup_icinga2_master__icingaweb2_module_director__skip_role` +* [repo_grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_grafana): `setup_icinga2_master__repo_grafana__skip_role` +* [grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/grafana): `setup_icinga2_master__grafana__skip_role` +* [grafana_grizzly](https://github.com/Linuxfabrik/lfops/tree/main/roles/grafana_grizzly): `setup_icinga2_master__grafana_grizzly__skip_role` +* [influxdb](https://github.com/Linuxfabrik/lfops/tree/main/roles/influxdb): `setup_icinga2_master__influxdb__skip_role` +* [icingaweb2_module_grafana](https://github.com/Linuxfabrik/lfops/tree/main/roles/icingaweb2_module_grafana): `setup_icinga2_master__icingaweb2_module_grafana__skip_role` +* [monitoring_plugins_grafana_dashboards](https://github.com/Linuxfabrik/lfops/tree/main/roles/monitoring_plugins_grafana_dashboards): `setup_icinga2_master__monitoring_plugins_grafana_dashboards__skip_role` + + +## setup_keycloak.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_keycloak__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [repo_mydumper](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mydumper): `keycloak__skip_repo_mydumper` +* [repo_mariadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb): `mariadb_server__skip_repo_mariadb` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `mariadb_server__skip_python` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `keycloak__skip_kernel_settings` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `keycloak__skip_policycoreutils` +* [mariadb_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/mariadb_server) +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps) +* [keycloak](https://github.com/Linuxfabrik/lfops/tree/main/roles/keycloak) + + +## setup_librenms.yml + +Calls the following roles (in order): + +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `librenms__skip_yum_utils` +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_librenms__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `librenms__skip_repo_epel` +* [repo_mydumper](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mydumper): `librenms__skip_repo_mydumper` +* [repo_mariadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb): `librenms__skip_repo_mariadb` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `setup_librenms__skip_policycoreutils` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_librenms__skip_selinux` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `librenms__skip_python` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `librenms__skip_kernel_settings` +* [mariadb_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/mariadb_server): `librenms__skip_mariadb_server` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `librenms__skip_repo_remi` +* [php](https://github.com/Linuxfabrik/lfops/tree/main/roles/php): `librenms__skip_php` +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps): `librenms__skip_apps` +* [librenms](https://github.com/Linuxfabrik/lfops/tree/main/roles/librenms) +* [apache_httpd](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_httpd): `librenms__skip_apache_httpd` + + +## setup_mastodon.yml + +Calls the following roles (in order): + +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `setup_mastodon__skip_python` +* [repo_postgresql](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_postgresql): `setup_mastodon__skip_repo_postgresql` +* [postgresql_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/postgresql_server): `setup_mastodon__skip_postgresql_server` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `setup_mastodon__skip_kernel_settings` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `setup_mastodon__skip_repo_remi` +* [repo_redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_redis) +* [redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/redis): `setup_mastodon__skip_redis` +* [repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch): `setup_mastodon__skip_repo_elasticsearch` +* [elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/elasticsearch): `setup_mastodon__skip_elasticsearch` +* [login](https://github.com/Linuxfabrik/lfops/tree/main/roles/login): `setup_mastodon__skip_login` +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_mastodon__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `setup_mastodon__skip_repo_epel` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `setup_mastodon__skip_policycoreutils` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_mastodon__skip_selinux` +* [apache_httpd](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_httpd): `setup_mastodon__skip_apache_httpd` +* [mastodon](https://github.com/Linuxfabrik/lfops/tree/main/roles/mastodon) + + +## setup_moodle.yml + +Calls the following roles (in order): + +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `setup_moodle__skip_policycoreutils` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `setup_moodle__skip_kernel_settings` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `setup_moodle__skip_python` +* [apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps): `setup_moodle__skip_apps` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `setup_moodle__skip_repo_remi` +* [php](https://github.com/Linuxfabrik/lfops/tree/main/roles/php): `setup_moodle__skip_php` +* [redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/redis): `setup_moodle__skip_redis` +* [repo_mydumper](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mydumper): `setup_moodle__skip_repo_mydumper` +* [repo_mariadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb): `setup_moodle__skip_repo_mariadb` +* [mariadb_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/mariadb_server): `setup_moodle__skip_mariadb_server` +* [apache_httpd](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_httpd): `setup_moodle__skip_apache_httpd` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_moodle__skip_selinux` +* [moodle](https://github.com/Linuxfabrik/lfops/tree/main/roles/moodle) + + +## setup_nextcloud.yml + +Calls the following roles (in order): + +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `setup_nextcloud__skip_yum_utils` +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_nextcloud__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `setup_nextcloud__skip_repo_epel` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `setup_nextcloud__skip_policycoreutils` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `setup_nextcloud__skip_python` +* [fail2ban](https://github.com/Linuxfabrik/lfops/tree/main/roles/fail2ban): `setup_nextcloud__skip_fail2ban` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `setup_nextcloud__skip_kernel_settings` +* [apache_httpd](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_httpd): `setup_nextcloud__skip_apache_httpd` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `setup_nextcloud__skip_repo_remi` +* [php](https://github.com/Linuxfabrik/lfops/tree/main/roles/php): `setup_nextcloud__skip_php` +* [repo_mydumper](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mydumper): `setup_nextcloud__skip_repo_mydumper` +* [repo_mariadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb): `setup_nextcloud__skip_repo_mariadb` +* [mariadb_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/mariadb_server): `setup_nextcloud__skip_mariadb_server` +* [redis](https://github.com/Linuxfabrik/lfops/tree/main/roles/redis): `setup_nextcloud__skip_redis` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_nextcloud__skip_selinux` +* [systemd_unit](https://github.com/Linuxfabrik/lfops/tree/main/roles/systemd_unit): `nextcloud__skip_systemd_unit` +* [nextcloud](https://github.com/Linuxfabrik/lfops/tree/main/roles/nextcloud) +* [repo_collabora_code](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_collabora_code): `setup_nextcloud__skip_repo_collabora_code` +* [repo_collabora](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_collabora): `setup_nextcloud__skip_repo_collabora` (default: `true`), `setup_nextcloud__skip_repo_collabora_code` +* [collabora](https://github.com/Linuxfabrik/lfops/tree/main/roles/collabora): `setup_nextcloud__skip_collabora` +* [coturn](https://github.com/Linuxfabrik/lfops/tree/main/roles/coturn): `setup_nextcloud__skip_coturn` +* [minio_client](https://github.com/Linuxfabrik/lfops/tree/main/roles/minio_client): `setup_nextcloud__skip_minio_client` +* [objectstore_backup](https://github.com/Linuxfabrik/lfops/tree/main/roles/objectstore_backup): `setup_nextcloud__skip_objectstore_backup` +* [icinga2_agent](https://github.com/Linuxfabrik/lfops/tree/main/roles/icinga2_agent) + + +## setup_rocketchat.yml + +Calls the following roles (in order): + +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `setup_rocketchat__skip_kernel_settings` +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_rocketchat__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `setup_rocketchat__skip_repo_epel` +* [python_venv](https://github.com/Linuxfabrik/lfops/tree/main/roles/python_venv): `setup_rocketchat__skip_python_venv` +* [repo_mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mongodb): `setup_rocketchat__skip_repo_mongodb` +* [mongodb](https://github.com/Linuxfabrik/lfops/tree/main/roles/mongodb): `setup_rocketchat__skip_mongodb` +* [login](https://github.com/Linuxfabrik/lfops/tree/main/roles/login): `setup_rocketchat__skip_login` +* [rocketchat](https://github.com/Linuxfabrik/lfops/tree/main/roles/rocketchat) +* [podman_containers](https://github.com/Linuxfabrik/lfops/tree/main/roles/podman_containers): `setup_rocketchat__skip_podman_containers` + + +## setup_wordpress.yml + +Calls the following roles (in order): + +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `setup_wordpress__skip_yum_utils` +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `setup_wordpress__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel): `setup_wordpress__skip_repo_epel` +* [repo_mydumper](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mydumper): `setup_wordpress__skip_repo_mydumper` +* [repo_mariadb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_mariadb): `setup_wordpress__skip_repo_mariadb` +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils): `setup_wordpress__skip_policycoreutils` +* [python](https://github.com/Linuxfabrik/lfops/tree/main/roles/python): `setup_wordpress__skip_python` +* [kernel_settings](https://github.com/Linuxfabrik/lfops/tree/main/roles/kernel_settings): `setup_wordpress__skip_kernel_settings` +* [mariadb_server](https://github.com/Linuxfabrik/lfops/tree/main/roles/mariadb_server): `setup_wordpress__skip_mariadb_server` +* [repo_remi](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_remi): `setup_wordpress__skip_repo_remi` +* [php](https://github.com/Linuxfabrik/lfops/tree/main/roles/php): `setup_wordpress__skip_php` +* [apache_httpd](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_httpd): `setup_wordpress__skip_apache_httpd` +* [selinux](https://github.com/Linuxfabrik/lfops/tree/main/roles/selinux): `setup_wordpress__skip_selinux` +* [wordpress](https://github.com/Linuxfabrik/lfops/tree/main/roles/wordpress) + + +## shell.yml + +Calls the following roles (in order): + +* [shell](https://github.com/Linuxfabrik/lfops/tree/main/roles/shell) + + +## snmp.yml + +Calls the following roles (in order): + +* [snmp](https://github.com/Linuxfabrik/lfops/tree/main/roles/snmp) + + +## squid.yml + +Calls the following roles (in order): + +* [squid](https://github.com/Linuxfabrik/lfops/tree/main/roles/squid) + + +## sshd.yml + +Calls the following roles (in order): + +* [policycoreutils](https://github.com/Linuxfabrik/lfops/tree/main/roles/policycoreutils) +* [sshd](https://github.com/Linuxfabrik/lfops/tree/main/roles/sshd) + + +## system_update.yml + +Calls the following roles (in order): + +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils): `system_update__skip_yum_utils` +* [at](https://github.com/Linuxfabrik/lfops/tree/main/roles/at) +* [mailx](https://github.com/Linuxfabrik/lfops/tree/main/roles/mailx) +* [postfix](https://github.com/Linuxfabrik/lfops/tree/main/roles/postfix): `system_update__skip_postfix` +* [mailto_root](https://github.com/Linuxfabrik/lfops/tree/main/roles/mailto_root): `system_update__skip_mailto_root` +* [system_update](https://github.com/Linuxfabrik/lfops/tree/main/roles/system_update) + + +## systemd_journald.yml + +Calls the following roles (in order): + +* [systemd_journald](https://github.com/Linuxfabrik/lfops/tree/main/roles/systemd_journald) + + +## systemd_unit.yml + +Calls the following roles (in order): + +* [systemd_unit](https://github.com/Linuxfabrik/lfops/tree/main/roles/systemd_unit) + + +## telegraf.yml + +Calls the following roles (in order): + +* [repo_influxdb](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_influxdb): `telegraf__skip_repo_influxdb` +* [telegraf](https://github.com/Linuxfabrik/lfops/tree/main/roles/telegraf) + + +## timezone.yml + +Calls the following roles (in order): + +* [timezone](https://github.com/Linuxfabrik/lfops/tree/main/roles/timezone) + + +## tools.yml + +Calls the following roles (in order): + +* [repo_baseos](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_baseos): `tools__skip_repo_baseos` +* [repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) +* [tools](https://github.com/Linuxfabrik/lfops/tree/main/roles/tools) + + +## unattended_upgrades.yml + +Calls the following roles (in order): + +* [unattended_upgrades](https://github.com/Linuxfabrik/lfops/tree/main/roles/unattended_upgrades) + + +## vsftpd.yml + +Calls the following roles (in order): + +* [vsftpd](https://github.com/Linuxfabrik/lfops/tree/main/roles/vsftpd) + + +## yum_utils.yml + +Calls the following roles (in order): + +* [yum_utils](https://github.com/Linuxfabrik/lfops/tree/main/roles/yum_utils) diff --git a/playbooks/example.yml b/playbooks/example.yml new file mode 100644 index 000000000..22941f232 --- /dev/null +++ b/playbooks/example.yml @@ -0,0 +1,27 @@ +- name: 'Playbook linuxfabrik.lfops.example' + hosts: + - 'lfops_example' + + pre_tasks: + - ansible.builtin.import_role: + name: 'shared' + tasks_from: 'log-start.yml' + tags: + - 'always' + + + roles: + + - role: 'linuxfabrik.lfops.repo_example' + when: + - 'not example__skip_repo_example | d(false)' + + - role: 'linuxfabrik.lfops.example' + + + post_tasks: + - ansible.builtin.import_role: + name: 'shared' + tasks_from: 'log-end.yml' + tags: + - 'always' diff --git a/playbooks/libreoffice.yml b/playbooks/libreoffice.yml index 13609d13e..c8fa25b0b 100644 --- a/playbooks/libreoffice.yml +++ b/playbooks/libreoffice.yml @@ -22,7 +22,7 @@ when: - 'ansible_facts["os_family"] == "RedHat"' - 'libreoffice__client_apache is defined' - - 'libreoffice__client_apache' + - 'libreoffice__client_apache | bool' - role: 'linuxfabrik.lfops.libreoffice' diff --git a/playbooks/setup_example.yml b/playbooks/setup_example.yml new file mode 100644 index 000000000..b16a78ac5 --- /dev/null +++ b/playbooks/setup_example.yml @@ -0,0 +1,83 @@ +# Setup playbook for an application with complex dependencies (webserver, database, etc.). +# Playbooks with such dependencies are prefixed by "setup_". +# +# skip_role and skip_injections variables: +# - setup_example____skip_role: skips the role entirely +# - setup_example____skip_injections: skips only the dependent_var injections +# (defaults to the value of skip_role if not set) + +- name: 'Playbook linuxfabrik.lfops.setup_example' + hosts: + - 'lfops_setup_example' + + vars: + + setup_example__mariadb_server__skip_injections__internal_var: '{{ setup_example__mariadb_server__skip_injections | d(setup_example__mariadb_server__skip_role__internal_var) }}' + setup_example__mariadb_server__skip_role__internal_var: '{{ setup_example__mariadb_server__skip_role | d(false) }}' + + pre_tasks: + - ansible.builtin.import_role: + name: 'shared' + tasks_from: 'log-start.yml' + tags: + - 'always' + + + roles: + + # === Repositories === + - role: 'linuxfabrik.lfops.repo_baseos' + repo_baseos__crb_repo_enabled__dependent_var: '{{ + repo_epel__repo_baseos__crb_repo_enabled__dependent_var + }}' + when: + - 'ansible_facts["os_family"] == "RedHat"' + - 'not setup_example__skip_repo_baseos | d(false)' + + - role: 'linuxfabrik.lfops.repo_epel' + when: + - 'ansible_facts["os_family"] == "RedHat"' + - 'not setup_example__skip_repo_epel | d(false)' + + # === SELinux === + - role: 'linuxfabrik.lfops.policycoreutils' + when: + - 'ansible_facts["os_family"] == "RedHat"' + - 'not setup_example__skip_policycoreutils | d(false)' + + - role: 'linuxfabrik.lfops.selinux' + selinux__booleans__dependent_var: '{{ + example__selinux__booleans__dependent_var + }}' + when: + - 'ansible_facts["os_family"] == "RedHat"' + - 'not setup_example__skip_selinux | d(false)' + + # === Web server === + - role: 'linuxfabrik.lfops.apache_httpd' + apache_httpd__mods__dependent_var: '{{ example__apache_httpd__mods__dependent_var | d([]) }}' + apache_httpd__vhosts__dependent_var: '{{ example__apache_httpd__vhosts__dependent_var | d([]) }}' + when: + - 'not setup_example__skip_apache_httpd | d(false)' + + # === Database === + - role: 'linuxfabrik.lfops.mariadb_server' + mariadb_server__databases__dependent_var: '{{ + (not setup_example__mariadb_server__skip_injections__internal_var) | ternary(example__mariadb_server__databases__dependent_var, []) + }}' + mariadb_server__users__dependent_var: '{{ + (not setup_example__mariadb_server__skip_injections__internal_var) | ternary(example__mariadb_server__users__dependent_var, []) + }}' + when: + - 'not setup_example__mariadb_server__skip_role__internal_var' + + # === Application === + - role: 'linuxfabrik.lfops.example' + + + post_tasks: + - ansible.builtin.import_role: + name: 'shared' + tasks_from: 'log-end.yml' + tags: + - 'always' diff --git a/plugins/lookup/bitwarden_item.py b/plugins/lookup/bitwarden_item.py index 996fc7dc7..c95e3ea7c 100644 --- a/plugins/lookup/bitwarden_item.py +++ b/plugins/lookup/bitwarden_item.py @@ -277,8 +277,6 @@ sample: 'root' ''' -import time - from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display @@ -290,9 +288,6 @@ # https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#developing-lookup-plugins # inspired by the lookup plugins lastpass (same topic) and redis (more modern) -SYNC_INTERVAL = 60 # seconds -SYNC_TIMESTAMP_FILE = '/tmp/lfops_bitwarden_sync_time' - class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): @@ -302,23 +297,7 @@ def run(self, terms, variables=None, **kwargs): raise AnsibleError('Not logged into Bitwarden, or Bitwarden Vault is locked. Please run `bw login` and `bw unlock` first.') display.vvv('lfbwlp - run - bitwarden vault is unlocked') - timestamp = 0 - try: - with open(SYNC_TIMESTAMP_FILE, 'r') as f: - timestamp = float(f.read().strip()) - except (ValueError, IOError): - pass # we just sync if an error occurs - - if time.time() - timestamp >= SYNC_INTERVAL: - display.vvv('lfbwlp - run - syncing the vault') - bw.sync() - timestamp = time.time() - - try: - with open(SYNC_TIMESTAMP_FILE, 'w') as f: - f.write(str(timestamp)) - except IOError: - display.vvv('lfbwlp - run - failed to write last sync time') + bw.sync() ret = [] for term in terms: @@ -329,7 +308,7 @@ def run(self, terms, variables=None, **kwargs): hostname = term.get('hostname', None) id_ = term.get('id', None) name = term.get('name', None) - notes = term.get('notes', 'Automatically generated by Ansible.') + notes = term.get('notes', 'Generated by Ansible.') organization_id = term.get('organization_id', None) password_length = term.get('password_length', 60) password_choice = term.get('password_choice', '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') @@ -347,7 +326,7 @@ def run(self, terms, variables=None, **kwargs): result['username'] = result['login']['username'] result['password'] = result['login']['password'] ret.append(result) - break # done here, go to next term + continue # done here, go to next term else: # item not found by ID. if there is an ID given we expect it to exist raise AnsibleError('Item with id {} not found.'.format(id_)) diff --git a/plugins/module_utils/bitwarden.py b/plugins/module_utils/bitwarden.py index 6d083d9a8..bc96de434 100644 --- a/plugins/module_utils/bitwarden.py +++ b/plugins/module_utils/bitwarden.py @@ -6,32 +6,39 @@ from __future__ import absolute_import, division, print_function +# This module requires Python 3.8+ (secrets, f-strings with =, os.replace, json.JSONDecodeError). This should be fine since it will always run on localhost and the Ansible Controller has to be Python 3.9+ anyway + +import copy import email.encoders import email.mime.application import email.mime.multipart import email.mime.nonmultipart import email.parser -import email.utils +import email.policy import json import mimetypes import os import secrets -import urllib.parse +import tempfile +import time from urllib.error import HTTPError, URLError from ansible.module_utils.common.collections import Mapping -from ansible.module_utils.six import PY2, PY3, string_types -from ansible.module_utils.six.moves import cStringIO - -try: - import email.policy -except ImportError: - # Py2 - import email.generator +from ansible.module_utils.six import string_types from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.module_utils.urls import (ConnectionError, SSLValidationError, open_url) +try: + from ansible.utils.display import Display + display = Display() +except ImportError: + # When used from a module (not a lookup plugin), this code runs inside an AnsiballZ + # process on the remote host where ansible.utils.display is not available. + class _NoopDisplay: + def vvv(self, msg, **kwargs): + pass + display = _NoopDisplay() def prepare_multipart_no_base64(fields): @@ -120,30 +127,15 @@ def prepare_multipart_no_base64(fields): m.attach(part) - if PY3: - # Ensure headers are not split over multiple lines - # The HTTP policy also uses CRLF by default - b_data = m.as_bytes(policy=email.policy.HTTP) - else: - # Py2 - # We cannot just call ``as_string`` since it provides no way - # to specify ``maxheaderlen`` - fp = cStringIO() # cStringIO seems to be required here - # Ensure headers are not split over multiple lines - g = email.generator.Generator(fp, maxheaderlen=0) - g.flatten(m) - # ``fix_eols`` switches from ``\n`` to ``\r\n`` - b_data = email.utils.fix_eols(fp.getvalue()) + # Ensure headers are not split over multiple lines + # The HTTP policy also uses CRLF by default + b_data = m.as_bytes(policy=email.policy.HTTP) del m headers, sep, b_content = b_data.partition(b'\r\n\r\n') del b_data - if PY3: - parser = email.parser.BytesHeaderParser().parsebytes - else: - # Py2 - parser = email.parser.HeaderParser().parsestr + parser = email.parser.BytesHeaderParser().parsebytes return ( parser(headers)['content-type'], # Message converts to native strings @@ -151,6 +143,11 @@ def prepare_multipart_no_base64(fields): ) +CACHE_DIR = os.environ.get('XDG_RUNTIME_DIR', '/tmp') +CACHE_FILE = os.path.join(CACHE_DIR, 'lfops_bitwarden_cache.json') +CACHE_VERSION = 2026032701 + + class BitwardenException(Exception): pass @@ -160,6 +157,8 @@ class Bitwarden(object): def __init__(self, hostname='127.0.0.1', port=8087): self._base_url = 'http://%s:%s' % (hostname, port) + self._cache = None + self._load_cache() def _api_call(self, url_path, method='GET', body=None, body_format='json'): url = '%s/%s' % (self._base_url, url_path) @@ -173,12 +172,13 @@ def _api_call(self, url_path, method='GET', body=None, body_format='json'): try: content_type, body = prepare_multipart_no_base64(body) except (TypeError, ValueError) as e: - BitwardenException('failed to parse body as form-multipart: %s' % to_native(e)) + raise BitwardenException('failed to parse body as form-multipart: %s' % to_native(e)) headers['Content-Type'] = content_type # mostly taken from ansible.builtin.url lookup plugin try: - response = open_url(url, method=method, data=body, headers=headers) + # increased the timeout since listing all items via `list/object/items` takes forever (13s for ~2500 items) + response = open_url(url, method=method, data=body, headers=headers, timeout=60) except HTTPError as e: raise BitwardenException("Received HTTP error for %s : %s" % (url, to_native(e))) except URLError as e: @@ -199,6 +199,64 @@ def _api_call(self, url_path, method='GET', body=None, body_format='json'): return result + def _load_cache(self): + """Load the cache from disk. If missing, unreadable, or invalid, start with an empty cache. + Freshness is handled by sync(). + """ + try: + with open(CACHE_FILE, 'r') as f: + data = json.load(f) + if data.get('version') == CACHE_VERSION: + self._cache = data + item_count = len(self._cache['items']) if self._cache['items'] is not None else 0 + display.vvv('lfbw - cache loaded from %s (%d items)' % (CACHE_FILE, item_count)) + return + except (IOError, OSError, ValueError, json.decoder.JSONDecodeError): + pass + self._cache = { + 'version': CACHE_VERSION, + 'sync_timestamp': 0, + 'items': None, + 'templates': {}, + } + display.vvv('lfbw - no valid cache found, starting fresh') + + + def _save_cache(self): + """Write the cache to disk atomically. + """ + try: + fd, tmp_path = tempfile.mkstemp( + dir=os.path.dirname(CACHE_FILE), + prefix='.lfops_bw_cache_', + ) + try: + with os.fdopen(fd, 'w') as f: + json.dump(self._cache, f) + os.replace(tmp_path, CACHE_FILE) + display.vvv('lfbw - cache saved to %s' % (CACHE_FILE)) + except Exception: + os.unlink(tmp_path) + raise + except (IOError, OSError): + display.vvv('lfbw - failed to save cache to %s' % (CACHE_FILE)) + + + def _get_template(self, template_name): + """Return a template from cache, fetching from API on first use. + Templates are static API schema definitions that never change. + """ + if template_name not in self._cache['templates']: + display.vvv('lfbw - fetching template "%s" from API' % (template_name)) + result = self._api_call('object/template/%s' % (template_name)) + self._cache['templates'][template_name] = result['data']['template'] + self._save_cache() + else: + display.vvv('lfbw - using cached template "%s"' % (template_name)) + return copy.deepcopy(self._cache['templates'][template_name]) + + + @property def is_unlocked(self): """Check if the Bitwarden vault is unlocked. """ @@ -206,10 +264,20 @@ def is_unlocked(self): return result['data']['template']['status'] == 'unlocked' - def sync(self): - """Pull the latest vault data from server. + def sync(self, force=False, interval=60): + """Pull the latest vault data from server and repopulate the items cache. + Syncs only if the last sync was more than `interval` seconds ago, unless `force` is True. """ - return self._api_call('sync', method='POST') + if not force and time.time() - self._cache.get('sync_timestamp', 0) < interval: + display.vvv('lfbw - sync skipped, last sync was recent enough') + return + display.vvv('lfbw - syncing vault (force=%s)' % (force)) + self._api_call('sync', method='POST') + result = self._api_call('list/object/items') + self._cache['items'] = result['data']['data'] + self._cache['sync_timestamp'] = time.time() + display.vvv('lfbw - sync complete, cached %d items' % (len(self._cache['items']))) + self._save_cache() def get_items(self, name, username=None, folder_id=None, collection_id=None, organization_id=None): @@ -256,18 +324,11 @@ def get_items(self, name, username=None, folder_id=None, collection_id=None, org if isinstance(organization_id, str) and len(organization_id.strip()) == 0: organization_id = None - params = urllib.parse.urlencode( - { - 'search': name, - }, - quote_via=urllib.parse.quote, - ) - result = self._api_call('list/object/items?%s' % (params)) - - # make sure that all the given parameters exactly match the requested one, as `bw` is not that precise (only performs a search) - # we are not using the filter parameters of the `bw` utility, as they perform an OR operation, but we want AND + display.vvv('lfbw - searching cache for name="%s", username="%s"' % (name, username)) matching_items = [] - for item in result['data']['data']: + for item in self._cache['items']: + if item.get('type') != 1: + continue # skip non-login items (cards, secure notes, identities) if item['name'] == name \ and (item['login']['username'] == username) \ and (item.get('folderId') == folder_id) \ @@ -280,13 +341,20 @@ def get_items(self, name, username=None, folder_id=None, collection_id=None, org and (item.get('organizationId') == organization_id): matching_items.append(item) + display.vvv('lfbw - found %d matching item(s)' % (len(matching_items))) return matching_items def get_item_by_id(self, item_id): """Get an item by ID from Bitwarden. Returns the item or None. Throws an exception if the id leads to unambiguous results. """ - + display.vvv('lfbw - looking up item by id=%s' % (item_id)) + for item in self._cache['items']: + if item.get('id') == item_id: + display.vvv('lfbw - found item in cache') + return item + # fallback to API if not found in cache (item could have been created externally) + display.vvv('lfbw - item not in cache, falling back to API') result = self._api_call('object/item/%s' % (item_id)) return result['data'] @@ -319,9 +387,7 @@ def get_template_item_login_uri(self, uris): """ login_uris = [] if uris: - # To create uris, fetch the JSON structure for that. - result = self._api_call('object/template/item.login.uri') - template = result['data']['template'] + template = self._get_template('item.login.uri') for uri in uris: login_uri = template.copy() # make sure we are not editing the same object repeatedly login_uri['uri'] = uri @@ -342,9 +408,7 @@ def get_template_item_login(self, username=None, password=None, login_uris=None) "totp": "JBSWY3DPEHPK3PXP" } """ - # To create a login item, fetch the JSON structure for that. - result = self._api_call('object/template/item.login') - login = result['data']['template'] + login = self._get_template('item.login') login['password'] = password login['totp'] = '' login['uris'] = login_uris or [] @@ -374,10 +438,7 @@ def get_template_item(self, name, login=None, notes=None, organization_id=None, "reprompt": 0 } """ - # To create an item later on, fetch the item JSON structure, and fill in the appropriate - # values. - result = self._api_call('object/template/item') - item = result['data']['template'] + item = self._get_template('item') item['collectionIds'] = collection_ids item['folderId'] = folder_id item['login'] = login @@ -391,20 +452,32 @@ def get_template_item(self, name, login=None, notes=None, organization_id=None, def create_item(self, item): """Creates an item object in Bitwarden. """ + display.vvv('lfbw - creating item "%s"' % (item.get('name', ''))) result = self._api_call('object/item', method='POST', body=item) + self._cache['items'].append(result['data']) + self._save_cache() + time.sleep(1) return result['data'] def edit_item(self, item, item_id): """Edits an item object in Bitwarden. """ + display.vvv('lfbw - editing item %s' % (item_id)) result = self._api_call('object/item/%s' % (item_id), method='PUT', body=item) + for i, cached_item in enumerate(self._cache['items']): + if cached_item.get('id') == item_id: + self._cache['items'][i] = result['data'] + break + self._save_cache() + time.sleep(1) return result['data'] def add_attachment(self, item_id, attachment_path): """Adds the file at `attachment_path` to the item specified by `item_id` """ + display.vvv('lfbw - adding attachment "%s" to item %s' % (attachment_path, item_id)) body = { 'file': { @@ -412,6 +485,12 @@ def add_attachment(self, item_id, attachment_path): }, } result = self._api_call('attachment?itemId=%s' % (item_id), method='POST', body=body, body_format='form-multipart') + for i, cached_item in enumerate(self._cache['items']): + if cached_item.get('id') == item_id: + self._cache['items'][i] = result['data'] + break + self._save_cache() + time.sleep(1) return result @staticmethod diff --git a/plugins/module_utils/ipa_diff.py b/plugins/module_utils/ipa_diff.py new file mode 100644 index 000000000..2f308bdb4 --- /dev/null +++ b/plugins/module_utils/ipa_diff.py @@ -0,0 +1,107 @@ +# Temporary diff helpers for ansible-freeipa modules. +# Remove once https://github.com/freeipa/ansible-freeipa/pull/1415 +# is merged and released. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +from ansible.module_utils._text import to_text + + +def _compare_key(arg, ipa_arg): + """Compare a single key's value using compare_args_ipa logic.""" + if isinstance(ipa_arg, (list, tuple)): + if not isinstance(arg, list): + arg = [arg] + if len(ipa_arg) != len(arg): + return False + if ipa_arg and arg and not ( + isinstance(ipa_arg[0], type(arg[0])) + or isinstance(arg[0], type(ipa_arg[0])) + ): + arg = [to_text(_a) for _a in arg] + try: + return set(arg) == set(ipa_arg) + except TypeError: + return arg == ipa_arg + return arg == ipa_arg + + +class IPADiffTracker(object): + """Track before/after state for Ansible --diff output.""" + + def __init__(self): + self._diffs = [] + + def build_diff(self): + """Return kwargs for exit_json (empty dict if no changes).""" + if not self._diffs: + return {} + return {"diff": self._diffs} + + def add_entry_diff(self, name, before, after): + """Record a diff entry for one IPA object.""" + if before == after: + return + self._diffs.append({ + "before_header": name, + "after_header": name, + "before": before, + "after": after, + }) + + +def gen_args_diff(args, res_find, ignore=None): + """Extract only changed keys from args vs res_find for diff output. + + Returns (before_dict, after_dict) containing only keys that differ. + Uses the same comparison logic as compare_args_ipa for consistency. + Single-element IPA lists are normalized to scalars for readability. + """ + if not args: + return {}, {} + before = {} + after = {} + if ignore is None: + ignore = [] + for key in args: + if key in ignore: + continue + arg = args[key] + ipa_arg = res_find.get(key, [""]) + if not _compare_key(arg, ipa_arg): + # Normalize for display + _ipa = ipa_arg[0] if isinstance(ipa_arg, (list, tuple)) \ + and len(ipa_arg) == 1 else ipa_arg + _arg = arg[0] if isinstance(arg, (list, tuple)) \ + and len(arg) == 1 else arg + before[key] = _ipa + after[key] = _arg + return before, after + + +def gen_member_diff(member_key, add_list, del_list, current_list): + """Compute before/after for one member category. + + Returns (before_dict, after_dict) with member_key as key and sorted + lists as values. Returns ({}, {}) if no changes. + """ + if not add_list and not del_list: + return {}, {} + current = sorted(current_list or []) + desired = sorted( + [x for x in current if x not in (del_list or [])] + + (add_list or []) + ) + return {member_key: current}, {member_key: desired} + + +def merge_diffs(*diff_pairs): + """Merge multiple (before, after) tuples into a single pair.""" + merged_before = {} + merged_after = {} + for _before, _after in diff_pairs: + merged_before.update(_before) + merged_after.update(_after) + return merged_before, merged_after diff --git a/plugins/modules/bitwarden_item.py b/plugins/modules/bitwarden_item.py index ac0920d2d..99a5e4f09 100644 --- a/plugins/modules/bitwarden_item.py +++ b/plugins/modules/bitwarden_item.py @@ -18,8 +18,8 @@ - If no password item is found, a new item is created. Useful for automation. - If you do not specify a name or Bitwarden ID, it searches using the name/title. - If there is an existing Bitwarden item that differs from the given parameters, the item is updated, and the updated item is returned. - - If a search returns multiple entries, this lookup plugin throws an error, since it cannot decide which one to use. - - On success, this lookup plugin returns the complete Bitwarden item object. + - If a search returns multiple entries, this module throws an error, since it cannot decide which one to use. + - On success, this module returns the complete Bitwarden item object. - If you don't specify a name/title for a password item, a name/title will be created automatically, using C(hostname - purpose) (for example "C(dbserver - MariaDB)") or just C(hostname) (for example "C(dbserver)", depending on what is provided). notes: @@ -288,7 +288,7 @@ def run_module(): module_args = dict( attachments=dict(type='list', required=False, default=[]), collection_id=dict(type='str', required=False, default=None), - folder_id=dict(type='list', required=False, default=None), + folder_id=dict(type='str', required=False, default=None), hostname=dict(type='str', required=False, default=None), id=dict(type='str', required=False, default=None), name=dict(type='str', required=False, default=None), diff --git a/plugins/modules/ipa_modules_LICENSE.txt b/plugins/modules/ipa_modules_LICENSE.txt new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/plugins/modules/ipa_modules_LICENSE.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/plugins/modules/ipagroup.py b/plugins/modules/ipagroup.py new file mode 100644 index 000000000..0730cabcf --- /dev/null +++ b/plugins/modules/ipagroup.py @@ -0,0 +1,1089 @@ +# -*- coding: utf-8 -*- + +# Authors: +# Thomas Woerner +# +# Copyright (C) 2019-2022 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Temporary copy with --diff support. Remove once +# https://github.com/freeipa/ansible-freeipa/pull/1415 +# is merged and released. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.0", + "supported_by": "community", + "status": ["preview"], +} + +DOCUMENTATION = """ +--- +module: ipagroup +short_description: Manage FreeIPA groups +description: Manage FreeIPA groups +extends_documentation_fragment: + - ipamodule_base_docs +options: + name: + description: The group name + type: list + elements: str + required: false + aliases: ["cn"] + groups: + description: The list of group dicts (internally gid). + type: list + elements: dict + suboptions: + name: + description: The group (internally gid). + type: str + required: true + aliases: ["cn"] + description: + description: The group description + type: str + required: false + gid: + description: The GID + type: int + required: false + aliases: ["gidnumber"] + nonposix: + description: Create as a non-POSIX group + required: false + type: bool + external: + description: Allow adding external non-IPA members from trusted domains + required: false + type: bool + posix: + description: + Create a non-POSIX group or change a non-POSIX to a posix group. + required: false + type: bool + nomembers: + description: Suppress processing of membership attributes + required: false + type: bool + user: + description: List of user names assigned to this group. + required: false + type: list + elements: str + group: + description: List of group names assigned to this group. + required: false + type: list + elements: str + service: + description: + - List of service names assigned to this group. + - Only usable with IPA versions 4.7 and up. + required: false + type: list + elements: str + membermanager_user: + description: + - List of member manager users assigned to this group. + - Only usable with IPA versions 4.8.4 and up. + required: false + type: list + elements: str + membermanager_group: + description: + - List of member manager groups assigned to this group. + - Only usable with IPA versions 4.8.4 and up. + required: false + type: list + elements: str + externalmember: + description: + - List of members of a trusted domain in DOM\\name or name@domain form. + Requires "server" context. + required: false + type: list + elements: str + aliases: ["ipaexternalmember", "external_member"] + idoverrideuser: + description: + - User ID overrides to add. Requires "server" context. + required: false + type: list + elements: str + rename: + description: Rename the group object + required: false + type: str + aliases: ["new_name"] + description: + description: The group description + type: str + required: false + gid: + description: The GID + type: int + required: false + aliases: ["gidnumber"] + nonposix: + description: Create as a non-POSIX group + required: false + type: bool + external: + description: Allow adding external non-IPA members from trusted domains + required: false + type: bool + posix: + description: + Create a non-POSIX group or change a non-POSIX to a posix group. + required: false + type: bool + nomembers: + description: Suppress processing of membership attributes + required: false + type: bool + user: + description: List of user names assigned to this group. + required: false + type: list + elements: str + group: + description: List of group names assigned to this group. + required: false + type: list + elements: str + service: + description: + - List of service names assigned to this group. + - Only usable with IPA versions 4.7 and up. + required: false + type: list + elements: str + membermanager_user: + description: + - List of member manager users assigned to this group. + - Only usable with IPA versions 4.8.4 and up. + required: false + type: list + elements: str + membermanager_group: + description: + - List of member manager groups assigned to this group. + - Only usable with IPA versions 4.8.4 and up. + required: false + type: list + elements: str + externalmember: + description: + - List of members of a trusted domain in DOM\\name or name@domain form. + Requires "server" context. + required: false + type: list + elements: str + aliases: ["ipaexternalmember", "external_member"] + idoverrideuser: + description: + - User ID overrides to add. Requires "server" context. + required: false + type: list + elements: str + action: + description: Work on group or member level + type: str + default: group + choices: ["member", "group"] + rename: + description: Rename the group object + required: false + type: str + aliases: ["new_name"] + state: + description: State to ensure + type: str + default: present + choices: ["present", "absent", "renamed"] +author: + - Thomas Woerner (@t-woerner) +""" + +EXAMPLES = """ +# Create group ops with gid 1234 +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: ops + gidnumber: 1234 + +# Create group sysops +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: sysops + +# Create group appops +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: appops + +# Create multiple groups ops, sysops +- ipagroup: + ipaadmin_password: SomeADMINpassword + groups: + - name: ops + gidnumber: 1234 + - name: sysops + +# Add user member pinky to group sysops +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: sysops + action: member + user: + - pinky + +# Add user member brain to group sysops +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: sysops + action: member + user: + - brain + +# Add group members sysops and appops to group ops +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: ops + group: + - sysops + - appops + +# Add user and group members to groups sysops and appops +- ipagroup: + ipaadmin_password: SomeADMINpassword + groups: + - name: sysops + user: + - user1 + - name: appops + group: + - group2 + +# Rename a group +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: oldname + rename: newestname + state: renamed + +# Create a non-POSIX group +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: nongroup + nonposix: yes + +# Turn a non-POSIX group into a POSIX group. +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: nonposix + posix: yes + +# Create an external group and add members from a trust to it. +# Module will fail if running under 'client' context. +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: extgroup + external: yes + externalmember: + - WINIPA\\Web Users + - WINIPA\\Developers + +# Create multiple non-POSIX and external groups +- ipagroup: + ipaadmin_password: SomeADMINpassword + groups: + - name: nongroup + nonposix: true + - name: extgroup + external: true + +# Remove groups sysops, appops, ops and nongroup +- ipagroup: + ipaadmin_password: SomeADMINpassword + name: sysops,appops,ops, nongroup + state: absent +""" + +RETURN = """ +""" + +from ansible.module_utils._text import to_text +from ansible_collections.freeipa.ansible_freeipa.plugins.module_utils.ansible_freeipa_module import \ + IPAAnsibleModule, compare_args_ipa, gen_add_del_lists, \ + gen_add_list, gen_intersection_list, api_check_param, \ + convert_to_sid +from ansible_collections.linuxfabrik.lfops.plugins.module_utils.ipa_diff import \ + IPADiffTracker, gen_args_diff, gen_member_diff, merge_diffs +from ansible.module_utils import six +if six.PY3: + unicode = str +# Ensuring (adding) several groups with mixed types external, nonposix +# and posix require to have a fix in IPA: +# FreeIPA issue: https://pagure.io/freeipa/issue/9349 +# FreeIPA fix: https://github.com/freeipa/freeipa/pull/6741 +try: + from ipaserver.plugins import baseldap +except ImportError: + FIX_6741_DEEPCOPY_OBJECTCLASSES = False +else: + FIX_6741_DEEPCOPY_OBJECTCLASSES = \ + "deepcopy" in baseldap.LDAPObject.__json__.__code__.co_names + + +def find_group(module, name): + _args = { + "all": True, + "cn": name, + } + + _result = module.ipa_command("group_find", name, _args) + + if len(_result["result"]) > 1: + module.fail_json( + msg="There is more than one group '%s'" % (name)) + elif len(_result["result"]) == 1: + _res = _result["result"][0] + # The returned services are of type ipapython.kerberos.Principal, + # also services are not case sensitive. Therefore services are + # converted to lowercase strings to be able to do the comparison. + if "member_service" in _res: + _res["member_service"] = \ + [to_text(svc).lower() for svc in _res["member_service"]] + return _res + + return None + + +def gen_args(description, gid, nomembers): + _args = {} + if description is not None: + _args["description"] = description + if gid is not None: + _args["gidnumber"] = gid + if nomembers is not None: + _args["nomembers"] = nomembers + + return _args + + +def gen_member_args(user, group, service, externalmember, idoverrideuser): + _args = {} + if user is not None: + _args["member_user"] = user + if group is not None: + _args["member_group"] = group + if service is not None: + _args["member_service"] = service + if externalmember is not None: + _args["member_external"] = externalmember + if idoverrideuser is not None: + _args["member_idoverrideuser"] = idoverrideuser + + return _args + + +def check_parameters(module, state, action): + invalid = ["description", "gid", "posix", "nonposix", "external", + "nomembers"] + if action == "group": + if state == "present": + invalid = [] + elif state == "absent": + invalid.extend(["user", "group", "service", "externalmember"]) + if state == "renamed": + if action == "member": + module.fail_json( + msg="Action member can not be used with state: renamed.") + invalid.extend(["user", "group", "service", "externalmember"]) + else: + invalid.append("rename") + module.params_fail_used_invalid(invalid, state, action) + + +def is_external_group(res_find): + """Verify if the result group is an external group.""" + return res_find and 'ipaexternalgroup' in res_find['objectclass'] + + +def is_posix_group(res_find): + """Verify if the result group is an posix group.""" + return res_find and 'posixgroup' in res_find['objectclass'] + + +def check_objectclass_args(module, res_find, posix, external): + # Only a nonposix group can be changed to posix or external + + # A posix group can not be changed to nonposix or external + if is_posix_group(res_find): + if external is not None and external or posix is False: + module.fail_json( + msg="Cannot change `posix` group to `non-posix` or " + "`external`.") + # An external group can not be changed to nonposix or posix or nonexternal + if is_external_group(res_find): + if external is False or posix is not None: + module.fail_json( + msg="Cannot change `external` group to `posix` or " + "`non-posix`.") + + +def main(): + group_spec = dict( + # present + description=dict(type="str", default=None), + gid=dict(type="int", aliases=["gidnumber"], default=None), + nonposix=dict(required=False, type='bool', default=None), + external=dict(required=False, type='bool', default=None), + posix=dict(required=False, type='bool', default=None), + nomembers=dict(required=False, type='bool', default=None), + user=dict(required=False, type='list', elements="str", + default=None), + group=dict(required=False, type='list', elements="str", + default=None), + service=dict(required=False, type='list', elements="str", + default=None), + idoverrideuser=dict(required=False, type='list', elements="str", + default=None), + membermanager_user=dict(required=False, type='list', + elements="str", default=None), + membermanager_group=dict(required=False, type='list', + elements="str", default=None), + externalmember=dict(required=False, type='list', elements="str", + default=None, + aliases=[ + "ipaexternalmember", + "external_member" + ]), + rename=dict(type="str", required=False, default=None, + aliases=["new_name"]), + ) + ansible_module = IPAAnsibleModule( + argument_spec=dict( + # general + name=dict(type="list", elements="str", aliases=["cn"], + default=None, required=False), + groups=dict(type="list", + default=None, + options=dict( + # Here name is a simple string + name=dict(type="str", required=True, + aliases=["cn"]), + # Add group specific parameters + **group_spec + ), + elements='dict', + required=False), + # general + action=dict(type="str", default="group", + choices=["member", "group"]), + state=dict(type="str", default="present", + choices=["present", "absent", "renamed"]), + + # Add group specific parameters for simple use case + **group_spec + ), + # It does not make sense to set posix, nonposix or external at the + # same time + mutually_exclusive=[['posix', 'nonposix', 'external'], + ["name", "groups"]], + required_one_of=[["name", "groups"]], + supports_check_mode=True, + ) + + ansible_module._ansible_debug = True + + # Get parameters + + # general + names = ansible_module.params_get("name") + groups = ansible_module.params_get("groups") + + # present + description = ansible_module.params_get("description") + gid = ansible_module.params_get("gid") + nonposix = ansible_module.params_get("nonposix") + external = ansible_module.params_get("external") + idoverrideuser = ansible_module.params_get("idoverrideuser") + posix = ansible_module.params_get("posix") + nomembers = ansible_module.params_get("nomembers") + user = ansible_module.params_get_lowercase("user") + group = ansible_module.params_get_lowercase("group") + # Services are not case sensitive + service = ansible_module.params_get_lowercase("service") + membermanager_user = ( + ansible_module.params_get_lowercase("membermanager_user")) + membermanager_group = ( + ansible_module.params_get_lowercase("membermanager_group")) + externalmember = ansible_module.params_get("externalmember") + # rename + rename = ansible_module.params_get("rename") + # state and action + action = ansible_module.params_get("action") + state = ansible_module.params_get("state") + + # Check parameters + + if (names is None or len(names) < 1) and \ + (groups is None or len(groups) < 1): + ansible_module.fail_json(msg="At least one name or groups is required") + + if state in ["present", "renamed"]: + if names is not None and len(names) != 1: + what = "renamed" if state == "renamed" else "added" + ansible_module.fail_json( + msg="Only one group can be %s at a time using 'name'." % what) + + check_parameters(ansible_module, state, action) + + if external is False: + ansible_module.fail_json( + msg="group can not be non-external") + + # Ensuring (adding) several groups with mixed types external, nonposix + # and posix require to have a fix in IPA: + # + # FreeIPA issue: https://pagure.io/freeipa/issue/9349 + # FreeIPA fix: https://github.com/freeipa/freeipa/pull/6741 + # + # The simple solution is to switch to client context for ensuring + # several groups simply if the user was not explicitly asking for + # the server context no matter if mixed types are used. + context = ansible_module.params_get("ipaapi_context") + if state == "present" and groups is not None and len(groups) > 1 \ + and not FIX_6741_DEEPCOPY_OBJECTCLASSES: + if context is None: + context = "client" + ansible_module.debug( + "Switching to client context due to an unfixed issue in " + "your IPA version: https://pagure.io/freeipa/issue/9349") + elif context == "server": + ansible_module.fail_json( + msg="Ensuring several groups with server context is not " + "supported by your IPA version: " + "https://pagure.io/freeipa/issue/9349") + + if ( + (externalmember is not None + or idoverrideuser is not None) + and context == "client" + ): + ansible_module.fail_json( + msg="Cannot use externalmember in client context." + ) + + # Use groups if names is None + if groups is not None: + names = groups + + # Init + + changed = False + exit_args = {} + diff_tracker = IPADiffTracker() + + # If nonposix is used, set posix as not nonposix + if nonposix is not None: + posix = not nonposix + + # Connect to IPA API + with ansible_module.ipa_connect(context=context): + + has_add_member_service = ansible_module.ipa_command_param_exists( + "group_add_member", "service") + if service is not None and not has_add_member_service: + ansible_module.fail_json( + msg="Managing a service as part of a group is not supported " + "by your IPA version") + + has_add_membermanager = ansible_module.ipa_command_exists( + "group_add_member_manager") + if ((membermanager_user is not None or + membermanager_group is not None) and not has_add_membermanager): + ansible_module.fail_json( + msg="Managing a membermanager user or group is not supported " + "by your IPA version" + ) + + has_idoverrideuser = api_check_param( + "group_add_member", "idoverrideuser") + if idoverrideuser is not None and not has_idoverrideuser: + ansible_module.fail_json( + msg="Managing a idoverrideuser as part of a group is not " + "supported by your IPA version") + + commands = [] + group_set = set() + + for group_name in names: + if isinstance(group_name, dict): + name = group_name.get("name") + if name in group_set: + ansible_module.fail_json( + msg="group '%s' is used more than once" % name) + group_set.add(name) + # present + description = group_name.get("description") + gid = group_name.get("gid") + nonposix = group_name.get("nonposix") + external = group_name.get("external") + idoverrideuser = group_name.get("idoverrideuser") + posix = group_name.get("posix") + # Check mutually exclusive condition for multiple groups + # creation. It's not possible to check it with + # `mutually_exclusive` argument in `IPAAnsibleModule` class + # because it accepts only (list[str] or list[list[str]]). Here + # we need to loop over all groups and fail on mutually + # exclusive ones. + if all((posix, nonposix)) or\ + all((posix, external)) or\ + all((nonposix, external)): + ansible_module.fail_json( + msg="parameters are mutually exclusive for group " + "`{0}`: posix|nonposix|external".format(name)) + # Duplicating the condition for multiple group creation + if external is False: + ansible_module.fail_json( + msg="group can not be non-external") + # If nonposix is used, set posix as not nonposix + if nonposix is not None: + posix = not nonposix + user = group_name.get("user") + group = group_name.get("group") + service = group_name.get("service") + membermanager_user = group_name.get("membermanager_user") + membermanager_group = group_name.get("membermanager_group") + externalmember = group_name.get("externalmember") + nomembers = group_name.get("nomembers") + rename = group_name.get("rename") + + check_parameters(ansible_module, state, action) + + elif ( + isinstance( + group_name, (str, unicode) # pylint: disable=W0012,E0606 + ) + ): + name = group_name + else: + ansible_module.fail_json(msg="Group '%s' is not valid" % + repr(group_name)) + + # Make sure group exists + res_find = find_group(ansible_module, name) + res_find_orig = res_find + + # external members must de handled as SID + externalmember = convert_to_sid(externalmember) + + # idoverrides need to be compared through SID + idoverrideuser_sid = convert_to_sid(idoverrideuser) + res_idoverrideuser_sid = convert_to_sid( + (res_find or {}).get("member_idoverrideuser", [])) + idoverride_set = dict( + list(zip(idoverrideuser_sid or [], idoverrideuser or [])) + + list( + zip( + res_idoverrideuser_sid or [], + (res_find or {}).get("member_idoverrideuser", []) + ) + ) + ) + + user_add, user_del = [], [] + group_add, group_del = [], [] + service_add, service_del = [], [] + externalmember_add, externalmember_del = [], [] + idoverrides_add, idoverrides_del = [], [] + membermanager_user_add, membermanager_user_del = [], [] + membermanager_group_add, membermanager_group_del = [], [] + + # Create command + if state == "present": + # Can't change an existing posix group + check_objectclass_args(ansible_module, res_find, posix, + external) + + # Generate args + args = gen_args(description, gid, nomembers) + attr_before, attr_after = {}, {} + + if action == "group": + # Found the group + if res_find is not None: + # For all settings in args, check if there are + # different settings in the find result. + # If yes: modify + # Also if it is a modification from nonposix to posix + # or nonposix to external. + if not compare_args_ipa( + ansible_module, args, res_find + ) or ( + not is_posix_group(res_find) and + not is_external_group(res_find) and + (posix or external) + ): + if posix: + args['posix'] = True + if external: + args['external'] = True + commands.append([name, "group_mod", args]) + attr_before, attr_after = gen_args_diff( + args, res_find) + else: + if posix is not None and not posix: + args['nonposix'] = True + if external: + args['external'] = True + commands.append([name, "group_add", args]) + attr_before, attr_after = {}, args + # Set res_find dict for next step + res_find = {} + + # if we just created/modified the group, update res_find + classes = list(res_find.setdefault("objectclass", [])) + if external and not is_external_group(res_find): + classes.append("ipaexternalgroup") + if posix and not is_posix_group(res_find): + classes.append("posixgroup") + res_find["objectclass"] = classes + + member_args = gen_member_args( + user, group, service, externalmember, idoverrideuser + ) + if not compare_args_ipa(ansible_module, member_args, + res_find): + # Generate addition and removal lists + user_add, user_del = gen_add_del_lists( + user, res_find.get("member_user")) + + group_add, group_del = gen_add_del_lists( + group, res_find.get("member_group")) + + service_add, service_del = gen_add_del_lists( + service, res_find.get("member_service")) + + (externalmember_add, + externalmember_del) = gen_add_del_lists( + externalmember, ( + list(res_find.get("member_external", [])) + + list(res_find.get("ipaexternalmember", [])) + ) + ) + + # There are multiple ways to name an AD User, and any + # can be used in idoverrides, so we create the add/del + # lists based on SID, and then use the given user name + # to the idoverride. + (idoverrides_add, + idoverrides_del) = gen_add_del_lists( + idoverrideuser_sid, res_idoverrideuser_sid) + idoverrides_add = [ + idoverride_set[sid] for sid in set(idoverrides_add) + ] + idoverrides_del = [ + idoverride_set[sid] for sid in set(idoverrides_del) + ] + + membermanager_user_add, membermanager_user_del = \ + gen_add_del_lists( + membermanager_user, + res_find.get("membermanager_user") + ) + + membermanager_group_add, membermanager_group_del = \ + gen_add_del_lists( + membermanager_group, + res_find.get("membermanager_group") + ) + + elif action == "member": + if res_find is None: + ansible_module.fail_json(msg="No group '%s'" % name) + + # Reduce add lists for member_user, member_group, + # member_service and member_external to new entries + # only that are not in res_find. + user_add = gen_add_list( + user, res_find.get("member_user")) + group_add = gen_add_list( + group, res_find.get("member_group")) + service_add = gen_add_list( + service, res_find.get("member_service")) + externalmember_add = gen_add_list( + externalmember, ( + list(res_find.get("member_external", [])) + + list(res_find.get("ipaexternalmember", [])) + ) + ) + idoverrides_add = gen_add_list( + idoverrideuser_sid, res_idoverrideuser_sid) + idoverrides_add = [ + idoverride_set[sid] for sid in set(idoverrides_add) + ] + + membermanager_user_add = gen_add_list( + membermanager_user, + res_find.get("membermanager_user") + ) + membermanager_group_add = gen_add_list( + membermanager_group, + res_find.get("membermanager_group") + ) + + elif state == "absent": + if action == "group": + if res_find is not None: + commands.append([name, "group_del", {}]) + + elif action == "member": + if res_find is None: + ansible_module.fail_json(msg="No group '%s'" % name) + + if not is_external_group(res_find) and externalmember: + ansible_module.fail_json( + msg="Cannot add external members to a " + "non-external group." + ) + + user_del = gen_intersection_list( + user, res_find.get("member_user")) + group_del = gen_intersection_list( + group, res_find.get("member_group")) + service_del = gen_intersection_list( + service, res_find.get("member_service")) + externalmember_del = gen_intersection_list( + externalmember, ( + list(res_find.get("member_external", [])) + + list(res_find.get("ipaexternalmember", [])) + ) + ) + idoverrides_del = gen_intersection_list( + idoverrideuser_sid, res_idoverrideuser_sid) + idoverrides_del = [ + idoverride_set[sid] for sid in set(idoverrides_del) + ] + + membermanager_user_del = gen_intersection_list( + membermanager_user, res_find.get("membermanager_user")) + membermanager_group_del = gen_intersection_list( + membermanager_group, + res_find.get("membermanager_group") + ) + elif state == "renamed": + if res_find is None: + ansible_module.fail_json(msg="No group '%s'" % name) + elif rename != name: + commands.append([name, 'group_mod', {"rename": rename}]) + else: + ansible_module.fail_json(msg="Unkown state '%s'" % state) + + # manage members + # setup member args for add/remove members. + add_member_args = { + "user": user_add, + "group": group_add, + } + + del_member_args = { + "user": user_del, + "group": group_del, + } + + if has_idoverrideuser: + add_member_args["idoverrideuser"] = idoverrides_add + del_member_args["idoverrideuser"] = idoverrides_del + + if has_add_member_service: + add_member_args["service"] = service_add + del_member_args["service"] = service_del + + if is_external_group(res_find): + if len(externalmember_add) > 0: + add_member_args["ipaexternalmember"] = \ + externalmember_add + if len(externalmember_del) > 0: + del_member_args["ipaexternalmember"] = \ + externalmember_del + elif externalmember: + ansible_module.fail_json( + msg="Cannot add external members to a " + "non-external group." + ) + + # Add members + add_members = any([user_add, group_add, idoverrides_add, + service_add, externalmember_add]) + if add_members: + commands.append( + [name, "group_add_member", add_member_args] + ) + # Remove members + remove_members = any([user_del, group_del, idoverrides_del, + service_del, externalmember_del]) + if remove_members: + commands.append( + [name, "group_remove_member", del_member_args] + ) + + # manage membermanager members + if has_add_membermanager: + # Add membermanager users and groups + if any([membermanager_user_add, membermanager_group_add]): + commands.append( + [name, "group_add_member_manager", + { + "user": membermanager_user_add, + "group": membermanager_group_add, + }] + ) + # Remove member manager + if any([membermanager_user_del, membermanager_group_del]): + commands.append( + [name, "group_remove_member_manager", + { + "user": membermanager_user_del, + "group": membermanager_group_del, + }] + ) + + # Diff tracking + _orig = res_find_orig or {} + if state == "present": + if action == "group": + before, after = merge_diffs( + (attr_before, attr_after), + gen_member_diff( + "user", user_add, user_del, + _orig.get("member_user")), + gen_member_diff( + "group", group_add, group_del, + _orig.get("member_group")), + gen_member_diff( + "service", service_add, service_del, + _orig.get("member_service")), + gen_member_diff( + "external", externalmember_add, + externalmember_del, + list(_orig.get("member_external", [])) + + list(_orig.get("ipaexternalmember", []))), + gen_member_diff( + "idoverrideuser", idoverrides_add, + idoverrides_del, + _orig.get("member_idoverrideuser")), + gen_member_diff( + "membermanager_user", + membermanager_user_add, membermanager_user_del, + _orig.get("membermanager_user")), + gen_member_diff( + "membermanager_group", + membermanager_group_add, membermanager_group_del, + _orig.get("membermanager_group")), + ) + diff_tracker.add_entry_diff(name, before, after) + elif action == "member": + before, after = merge_diffs( + gen_member_diff( + "user", user_add, user_del, + _orig.get("member_user")), + gen_member_diff( + "group", group_add, group_del, + _orig.get("member_group")), + gen_member_diff( + "service", service_add, service_del, + _orig.get("member_service")), + gen_member_diff( + "external", externalmember_add, + externalmember_del, + list(_orig.get("member_external", [])) + + list(_orig.get("ipaexternalmember", []))), + gen_member_diff( + "idoverrideuser", idoverrides_add, + idoverrides_del, + _orig.get("member_idoverrideuser")), + gen_member_diff( + "membermanager_user", + membermanager_user_add, membermanager_user_del, + _orig.get("membermanager_user")), + gen_member_diff( + "membermanager_group", + membermanager_group_add, membermanager_group_del, + _orig.get("membermanager_group")), + ) + diff_tracker.add_entry_diff(name, before, after) + elif state == "renamed": + if rename != name and res_find_orig is not None: + diff_tracker.add_entry_diff( + name, {"cn": name}, {"cn": rename}) + elif state == "absent": + if action == "group": + if res_find_orig is not None: + diff_tracker.add_entry_diff( + name, + {"state": "present"}, {"state": "absent"}) + elif action == "member": + before, after = merge_diffs( + gen_member_diff( + "user", user_add, user_del, + _orig.get("member_user")), + gen_member_diff( + "group", group_add, group_del, + _orig.get("member_group")), + gen_member_diff( + "service", service_add, service_del, + _orig.get("member_service")), + gen_member_diff( + "external", externalmember_add, + externalmember_del, + list(_orig.get("member_external", [])) + + list(_orig.get("ipaexternalmember", []))), + gen_member_diff( + "idoverrideuser", idoverrides_add, + idoverrides_del, + _orig.get("member_idoverrideuser")), + gen_member_diff( + "membermanager_user", + membermanager_user_add, membermanager_user_del, + _orig.get("membermanager_user")), + gen_member_diff( + "membermanager_group", + membermanager_group_add, membermanager_group_del, + _orig.get("membermanager_group")), + ) + diff_tracker.add_entry_diff(name, before, after) + + # Execute commands + changed = ansible_module.execute_ipa_commands( + commands, batch=True, keeponly=[], fail_on_member_errors=True) + + # Done + + _exit_kwargs = dict(exit_args, **diff_tracker.build_diff()) + ansible_module.exit_json(changed=changed, **_exit_kwargs) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ipahbacrule.py b/plugins/modules/ipahbacrule.py new file mode 100644 index 000000000..db93fef5f --- /dev/null +++ b/plugins/modules/ipahbacrule.py @@ -0,0 +1,660 @@ +# -*- coding: utf-8 -*- + +# Authors: +# Thomas Woerner +# +# Copyright (C) 2019-2022 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Temporary copy with --diff support. Remove once +# https://github.com/freeipa/ansible-freeipa/pull/1415 +# is merged and released. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.0", + "supported_by": "community", + "status": ["preview"], +} + +DOCUMENTATION = """ +--- +module: ipahbacrule +short_description: Manage FreeIPA HBAC rules +description: Manage FreeIPA HBAC rules +extends_documentation_fragment: + - ipamodule_base_docs +options: + name: + description: The hbacrule name + type: list + elements: str + required: true + aliases: ["cn"] + description: + description: The hbacrule description + type: str + required: false + usercategory: + description: User category the rule applies to + type: str + required: false + aliases: ["usercat"] + choices: ["all", ""] + hostcategory: + description: Host category the rule applies to + type: str + required: false + aliases: ["hostcat"] + choices: ["all", ""] + servicecategory: + description: Service category the rule applies to + type: str + required: false + aliases: ["servicecat"] + choices: ["all", ""] + nomembers: + description: Suppress processing of membership attributes + required: false + type: bool + host: + description: List of host names assigned to this hbacrule. + required: false + type: list + elements: str + hostgroup: + description: List of host groups assigned to this hbacrule. + required: false + type: list + elements: str + hbacsvc: + description: List of HBAC service names assigned to this hbacrule. + required: false + type: list + elements: str + hbacsvcgroup: + description: List of HBAC service names assigned to this hbacrule. + required: false + type: list + elements: str + user: + description: List of user names assigned to this hbacrule. + required: false + type: list + elements: str + group: + description: List of user groups assigned to this hbacrule. + required: false + type: list + elements: str + action: + description: Work on hbacrule or member level + type: str + default: hbacrule + choices: ["member", "hbacrule"] + state: + description: State to ensure + type: str + default: present + choices: ["present", "absent", "enabled", "disabled"] +author: + - Thomas Woerner (@t-woerner) +""" + +EXAMPLES = """ +# Ensure HBAC Rule allhosts is present +- ipahbacrule: + ipaadmin_password: SomeADMINpassword + name: allhosts + usercategory: all + +# Ensure host server is present in HBAC Rule allhosts +- ipahbacrule: + ipaadmin_password: SomeADMINpassword + name: allhosts + host: server + action: member + +# Ensure HBAC Rule sshd-pinky is present +- ipahbacrule: + ipaadmin_password: SomeADMINpassword + name: sshd-pinky + hostcategory: all + +# Ensure user pinky is present in HBAC Rule sshd-pinky +- ipahbacrule: + ipaadmin_password: SomeADMINpassword + name: sshd-pinky + user: pinky + action: member + +# Ensure HBAC service sshd is present in HBAC Rule sshd-pinky +- ipahbacrule: + ipaadmin_password: SomeADMINpassword + name: sshd-pinky + hbacsvc: sshd + action: member + +# Ensure HBAC Rule sshd-pinky is disabled +- ipahbacrule: + ipaadmin_password: SomeADMINpassword + name: sshd-pinky + state: disabled + +# Ensure HBAC Rule sshd-pinky is enabled +- ipahbacrule: + ipaadmin_password: SomeADMINpassword + name: sshd-pinky + state: enabled + +# Ensure HBAC Rule sshd-pinky is absent +- ipahbacrule: + ipaadmin_password: SomeADMINpassword + name: sshd-pinky + state: absent +""" + +RETURN = """ +""" + +from ansible_collections.freeipa.ansible_freeipa.plugins.module_utils.ansible_freeipa_module import \ + IPAAnsibleModule, compare_args_ipa, gen_add_del_lists, gen_add_list, \ + gen_intersection_list, ensure_fqdn +from ansible_collections.linuxfabrik.lfops.plugins.module_utils.ipa_diff import \ + IPADiffTracker, gen_args_diff, gen_member_diff, merge_diffs + + +def find_hbacrule(module, name): + _args = { + "all": True, + "cn": name, + } + + _result = module.ipa_command("hbacrule_find", name, _args) + + if len(_result["result"]) > 1: + module.fail_json( + msg="There is more than one hbacrule '%s'" % (name)) + elif len(_result["result"]) == 1: + res = _result["result"][0] + # hbacsvcgroup names are converted to lower case while creation with + # hbacsvcgroup_add, but builtin names may have mixed case as "Sudo", + # breaking the lower case comparison. Therefore all + # memberservice_hbacsvcgroup items are converted to lower case. + # (See: https://pagure.io/freeipa/issue/9464). + _member = "memberservice_hbacsvcgroup" + if _member in res: + res[_member] = [item.lower() for item in res[_member]] + return res + + return None + + +def gen_args(description, usercategory, hostcategory, servicecategory, + nomembers): + _args = {} + if description is not None: + _args["description"] = description + if usercategory is not None: + _args["usercategory"] = usercategory + if hostcategory is not None: + _args["hostcategory"] = hostcategory + if servicecategory is not None: + _args["servicecategory"] = servicecategory + if nomembers is not None: + _args["nomembers"] = nomembers + + return _args + + +def main(): + ansible_module = IPAAnsibleModule( + argument_spec=dict( + # general + name=dict(type="list", elements="str", aliases=["cn"], + required=True), + # present + description=dict(type="str", default=None), + usercategory=dict(type="str", default=None, + aliases=["usercat"], choices=["all", ""]), + hostcategory=dict(type="str", default=None, + aliases=["hostcat"], choices=["all", ""]), + servicecategory=dict(type="str", default=None, + aliases=["servicecat"], choices=["all", ""]), + nomembers=dict(required=False, type='bool', default=None), + host=dict(required=False, type='list', elements="str", + default=None), + hostgroup=dict(required=False, type='list', elements="str", + default=None), + hbacsvc=dict(required=False, type='list', elements="str", + default=None), + hbacsvcgroup=dict(required=False, type='list', elements="str", + default=None), + user=dict(required=False, type='list', elements="str", + default=None), + group=dict(required=False, type='list', elements="str", + default=None), + action=dict(type="str", default="hbacrule", + choices=["member", "hbacrule"]), + # state + state=dict(type="str", default="present", + choices=["present", "absent", + "enabled", "disabled"]), + ), + supports_check_mode=True, + ) + + ansible_module._ansible_debug = True + + # Get parameters + + # general + names = ansible_module.params_get("name") + + # present + description = ansible_module.params_get("description") + usercategory = ansible_module.params_get("usercategory") + hostcategory = ansible_module.params_get("hostcategory") + servicecategory = ansible_module.params_get("servicecategory") + nomembers = ansible_module.params_get("nomembers") + host = ansible_module.params_get_lowercase("host") + hostgroup = ansible_module.params_get_lowercase("hostgroup") + hbacsvc = ansible_module.params_get_lowercase("hbacsvc") + hbacsvcgroup = ansible_module.params_get_lowercase("hbacsvcgroup") + user = ansible_module.params_get_lowercase("user") + group = ansible_module.params_get_lowercase("group") + action = ansible_module.params_get("action") + # state + state = ansible_module.params_get("state") + + # Check parameters + + invalid = [] + + if state == "present": + if len(names) != 1: + ansible_module.fail_json( + msg="Only one hbacrule can be added at a time.") + if action == "member": + invalid = ["description", "usercategory", "hostcategory", + "servicecategory", "nomembers"] + else: + if hostcategory == 'all' and any([host, hostgroup]): + ansible_module.fail_json( + msg="Hosts cannot be added when host category='all'") + if usercategory == 'all' and any([user, group]): + ansible_module.fail_json( + msg="Users cannot be added when user category='all'") + if servicecategory == 'all' and any([hbacsvc, hbacsvcgroup]): + ansible_module.fail_json( + msg="Services cannot be added when service category='all'") + + elif state == "absent": + if len(names) < 1: + ansible_module.fail_json(msg="No name given.") + invalid = ["description", "usercategory", "hostcategory", + "servicecategory", "nomembers"] + if action == "hbacrule": + invalid.extend(["host", "hostgroup", "hbacsvc", "hbacsvcgroup", + "user", "group"]) + + elif state in ["enabled", "disabled"]: + if len(names) < 1: + ansible_module.fail_json(msg="No name given.") + if action == "member": + ansible_module.fail_json( + msg="Action member can not be used with states enabled and " + "disabled") + invalid = ["description", "usercategory", "hostcategory", + "servicecategory", "nomembers", "host", "hostgroup", + "hbacsvc", "hbacsvcgroup", "user", "group"] + else: + ansible_module.fail_json(msg="Invalid state '%s'" % state) + + ansible_module.params_fail_used_invalid(invalid, state, action) + + # Init + + changed = False + exit_args = {} + diff_tracker = IPADiffTracker() + + # Connect to IPA API + with ansible_module.ipa_connect(): + + # Get default domain + default_domain = ansible_module.ipa_get_domain() + + # Ensure fqdn host names, use default domain for simple names + if host is not None: + _host = [ensure_fqdn(x, default_domain).lower() for x in host] + host = _host + + commands = [] + + for name in names: + # Make sure hbacrule exists + res_find = find_hbacrule(ansible_module, name) + res_find_orig = res_find + + host_add, host_del = [], [] + hostgroup_add, hostgroup_del = [], [] + hbacsvc_add, hbacsvc_del = [], [] + hbacsvcgroup_add, hbacsvcgroup_del = [], [] + user_add, user_del = [], [] + group_add, group_del = [], [] + + # Create command + if state == "present": + # Generate args + args = gen_args(description, usercategory, hostcategory, + servicecategory, nomembers) + attr_before, attr_after = {}, {} + + if action == "hbacrule": + # Found the hbacrule + if res_find is not None: + # Remove usercategory, hostcategory and + # servicecategory from args if "" and category + # not in res_find (needed for idempotency) + if "usercategory" in args and \ + args["usercategory"] == "" and \ + "usercategory" not in res_find: + del args["usercategory"] + if "hostcategory" in args and \ + args["hostcategory"] == "" and \ + "hostcategory" not in res_find: + del args["hostcategory"] + if "servicecategory" in args and \ + args["servicecategory"] == "" and \ + "servicecategory" not in res_find: + del args["servicecategory"] + + # For all settings is args, check if there are + # different settings in the find result. + # If yes: modify + if not compare_args_ipa(ansible_module, args, + res_find): + commands.append([name, "hbacrule_mod", args]) + attr_before, attr_after = gen_args_diff( + args, res_find) + else: + commands.append([name, "hbacrule_add", args]) + attr_before, attr_after = {}, args + # Set res_find to empty dict for next step + res_find = {} + + # Generate addition and removal lists + if host is not None: + host_add, host_del = gen_add_del_lists( + host, res_find.get("memberhost_host")) + + if hostgroup is not None: + hostgroup_add, hostgroup_del = gen_add_del_lists( + hostgroup, res_find.get("memberhost_hostgroup")) + + if hbacsvc is not None: + hbacsvc_add, hbacsvc_del = gen_add_del_lists( + hbacsvc, res_find.get("memberservice_hbacsvc"), + ) + + if hbacsvcgroup is not None: + hbacsvcgroup_add, hbacsvcgroup_del = gen_add_del_lists( + hbacsvcgroup, + res_find.get("memberservice_hbacsvcgroup")) + + if user is not None: + user_add, user_del = gen_add_del_lists( + user, res_find.get("memberuser_user")) + + if group is not None: + group_add, group_del = gen_add_del_lists( + group, res_find.get("memberuser_group")) + + elif action == "member": + if res_find is None: + ansible_module.fail_json(msg="No hbacrule '%s'" % name) + + # Generate add lists for host, hostgroup and + # res_find to only try to add hosts and hostgroups + # that not in hbacrule already + if host: + host_add = gen_add_list( + host, res_find.get("memberhost_host")) + if hostgroup: + hostgroup_add = gen_add_list( + hostgroup, res_find.get("memberhost_hostgroup")) + + # Generate add lists for hbacsvc, hbacsvcgroup and + # res_find to only try to add hbacsvcs and hbacsvcgroups + # that not in hbacrule already + if hbacsvc: + hbacsvc_add = gen_add_list( + hbacsvc, res_find.get("memberservice_hbacsvc")) + if hbacsvcgroup: + hbacsvcgroup_add = gen_add_list( + hbacsvcgroup, + res_find.get("memberservice_hbacsvcgroup")) + + # Generate add lists for user, group and + # res_find to only try to add users and groups + # that not in hbacrule already + if user: + user_add = gen_add_list( + user, res_find.get("memberuser_user")) + if group: + group_add = gen_add_list( + group, res_find.get("memberuser_group")) + + elif state == "absent": + if action == "hbacrule": + if res_find is not None: + commands.append([name, "hbacrule_del", {}]) + + elif action == "member": + if res_find is None: + ansible_module.fail_json(msg="No hbacrule '%s'" % name) + + # Generate intersection lists for host, hostgroup and + # res_find to only try to remove hosts and hostgroups + # that are in hbacrule + if host: + if "memberhost_host" in res_find: + host_del = gen_intersection_list( + host, res_find["memberhost_host"]) + if hostgroup: + if "memberhost_hostgroup" in res_find: + hostgroup_del = gen_intersection_list( + hostgroup, res_find["memberhost_hostgroup"]) + + # Generate intersection lists for hbacsvc, hbacsvcgroup + # and res_find to only try to remove hbacsvcs and + # hbacsvcgroups that are in hbacrule + if hbacsvc: + if "memberservice_hbacsvc" in res_find: + hbacsvc_del = gen_intersection_list( + hbacsvc, res_find["memberservice_hbacsvc"]) + if hbacsvcgroup: + if "memberservice_hbacsvcgroup" in res_find: + hbacsvcgroup_del = gen_intersection_list( + hbacsvcgroup, + res_find["memberservice_hbacsvcgroup"]) + + # Generate intersection lists for user, group and + # res_find to only try to remove users and groups + # that are in hbacrule + if user: + if "memberuser_user" in res_find: + user_del = gen_intersection_list( + user, res_find["memberuser_user"]) + if group: + if "memberuser_group" in res_find: + group_del = gen_intersection_list( + group, res_find["memberuser_group"]) + + elif state == "enabled": + if res_find is None: + ansible_module.fail_json(msg="No hbacrule '%s'" % name) + # hbacrule_enable is not failing on an enabled hbacrule + # Therefore it is needed to have a look at the ipaenabledflag + # in res_find. + # FreeIPA 4.9.10+ and 4.10 use proper mapping for + # boolean values, so we need to convert it to str + # for comparison. + # See: https://github.com/freeipa/freeipa/pull/6294 + enabled_flag = str(res_find.get("ipaenabledflag", [False])[0]) + if enabled_flag.upper() != "TRUE": + commands.append([name, "hbacrule_enable", {}]) + diff_tracker.add_entry_diff( + name, {"enabled": False}, {"enabled": True}) + + elif state == "disabled": + if res_find is None: + ansible_module.fail_json(msg="No hbacrule '%s'" % name) + # hbacrule_disable is not failing on an enabled hbacrule + # Therefore it is needed to have a look at the ipaenabledflag + # in res_find. + # FreeIPA 4.9.10+ and 4.10 use proper mapping for + # boolean values, so we need to convert it to str + # for comparison. + # See: https://github.com/freeipa/freeipa/pull/6294 + enabled_flag = str(res_find.get("ipaenabledflag", [False])[0]) + if enabled_flag.upper() != "FALSE": + commands.append([name, "hbacrule_disable", {}]) + diff_tracker.add_entry_diff( + name, {"enabled": True}, {"enabled": False}) + + else: + ansible_module.fail_json(msg="Unkown state '%s'" % state) + + # Manage HBAC rule members. + + # Add hosts and hostgroups + if len(host_add) > 0 or len(hostgroup_add) > 0: + commands.append([name, "hbacrule_add_host", + { + "host": host_add, + "hostgroup": hostgroup_add, + }]) + # Remove hosts and hostgroups + if len(host_del) > 0 or len(hostgroup_del) > 0: + commands.append([name, "hbacrule_remove_host", + { + "host": host_del, + "hostgroup": hostgroup_del, + }]) + + # Add hbacsvcs and hbacsvcgroups + if len(hbacsvc_add) > 0 or len(hbacsvcgroup_add) > 0: + commands.append([name, "hbacrule_add_service", + { + "hbacsvc": hbacsvc_add, + "hbacsvcgroup": hbacsvcgroup_add, + }]) + # Remove hbacsvcs and hbacsvcgroups + if len(hbacsvc_del) > 0 or len(hbacsvcgroup_del) > 0: + commands.append([name, "hbacrule_remove_service", + { + "hbacsvc": hbacsvc_del, + "hbacsvcgroup": hbacsvcgroup_del, + }]) + + # Add users and groups + if len(user_add) > 0 or len(group_add) > 0: + commands.append([name, "hbacrule_add_user", + { + "user": user_add, + "group": group_add, + }]) + # Remove users and groups + if len(user_del) > 0 or len(group_del) > 0: + commands.append([name, "hbacrule_remove_user", + { + "user": user_del, + "group": group_del, + }]) + + # Diff tracking + _orig = res_find_orig or {} + if state == "present": + before, after = merge_diffs( + (attr_before, attr_after), + gen_member_diff( + "host", host_add, host_del, + _orig.get("memberhost_host")), + gen_member_diff( + "hostgroup", hostgroup_add, hostgroup_del, + _orig.get("memberhost_hostgroup")), + gen_member_diff( + "hbacsvc", hbacsvc_add, hbacsvc_del, + _orig.get("memberservice_hbacsvc")), + gen_member_diff( + "hbacsvcgroup", hbacsvcgroup_add, hbacsvcgroup_del, + _orig.get("memberservice_hbacsvcgroup")), + gen_member_diff( + "user", user_add, user_del, + _orig.get("memberuser_user")), + gen_member_diff( + "group", group_add, group_del, + _orig.get("memberuser_group")), + ) + diff_tracker.add_entry_diff(name, before, after) + elif state == "absent": + if action == "hbacrule": + if res_find_orig is not None: + diff_tracker.add_entry_diff( + name, + {"state": "present"}, {"state": "absent"}) + elif action == "member": + before, after = merge_diffs( + gen_member_diff( + "host", host_add, host_del, + _orig.get("memberhost_host")), + gen_member_diff( + "hostgroup", hostgroup_add, hostgroup_del, + _orig.get("memberhost_hostgroup")), + gen_member_diff( + "hbacsvc", hbacsvc_add, hbacsvc_del, + _orig.get("memberservice_hbacsvc")), + gen_member_diff( + "hbacsvcgroup", hbacsvcgroup_add, + hbacsvcgroup_del, + _orig.get("memberservice_hbacsvcgroup")), + gen_member_diff( + "user", user_add, user_del, + _orig.get("memberuser_user")), + gen_member_diff( + "group", group_add, group_del, + _orig.get("memberuser_group")), + ) + diff_tracker.add_entry_diff(name, before, after) + + # Execute commands + + changed = ansible_module.execute_ipa_commands( + commands, fail_on_member_errors=True) + + # Done + + _exit_kwargs = dict(exit_args, **diff_tracker.build_diff()) + ansible_module.exit_json(changed=changed, **_exit_kwargs) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ipahostgroup.py b/plugins/modules/ipahostgroup.py new file mode 100644 index 000000000..29c526874 --- /dev/null +++ b/plugins/modules/ipahostgroup.py @@ -0,0 +1,568 @@ +# -*- coding: utf-8 -*- + +# Authors: +# Thomas Woerner +# +# Copyright (C) 2019-2022 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Temporary copy with --diff support. Remove once +# https://github.com/freeipa/ansible-freeipa/pull/1415 +# is merged and released. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.0", + "supported_by": "community", + "status": ["preview"], +} + + +DOCUMENTATION = """ +--- +module: ipahostgroup +short_description: Manage FreeIPA hostgroups +description: Manage FreeIPA hostgroups +extends_documentation_fragment: + - ipamodule_base_docs +options: + name: + description: The hostgroup name + type: list + elements: str + required: true + aliases: ["cn"] + description: + description: The hostgroup description + type: str + required: false + nomembers: + description: Suppress processing of membership attributes + required: false + type: bool + host: + description: List of host names assigned to this hostgroup. + required: false + type: list + elements: str + hostgroup: + description: List of hostgroup names assigned to this hostgroup. + required: false + type: list + elements: str + membermanager_user: + description: + - List of member manager users assigned to this hostgroup. + - Only usable with IPA versions 4.8.4 and up. + required: false + type: list + elements: str + membermanager_group: + description: + - List of member manager groups assigned to this hostgroup. + - Only usable with IPA versions 4.8.4 and up. + required: false + type: list + elements: str + rename: + description: + - Rename hostgroup to the given name. + - Only usable with IPA versions 4.8.7 and up. + type: str + required: false + aliases: ["new_name"] + action: + description: Work on hostgroup or member level + type: str + default: hostgroup + choices: ["member", "hostgroup"] + state: + description: State to ensure + type: str + default: present + choices: ["present", "absent", "renamed"] +author: + - Thomas Woerner (@t-woerner) +""" + +EXAMPLES = """ +# Ensure host-group databases is present +- ipahostgroup: + ipaadmin_password: SomeADMINpassword + name: databases + host: + - db.example.com + hostgroup: + - mysql-server + - oracle-server + +# Ensure hosts and hostgroups are present in existing databases hostgroup +- ipahostgroup: + ipaadmin_password: SomeADMINpassword + name: databases + host: + - db.example.com + hostgroup: + - mysql-server + - oracle-server + action: member + +# Ensure hosts and hostgroups are absent in databases hostgroup +- ipahostgroup: + ipaadmin_password: SomeADMINpassword + name: databases + host: + - db.example.com + hostgroup: + - mysql-server + - oracle-server + action: member + state: absent + +# Rename hostgroup +- ipahostgroup: + ipaadmin_password: SomeADMINpassword + name: databases + rename: datalake + +# Ensure host-group databases is absent +- ipahostgroup: + ipaadmin_password: SomeADMINpassword + name: databases + state: absent +""" + +RETURN = """ +""" + +from ansible_collections.freeipa.ansible_freeipa.plugins.module_utils.ansible_freeipa_module import \ + IPAAnsibleModule, compare_args_ipa, gen_add_del_lists, gen_add_list, \ + gen_intersection_list, ensure_fqdn +from ansible_collections.linuxfabrik.lfops.plugins.module_utils.ipa_diff import \ + IPADiffTracker, gen_args_diff, gen_member_diff, merge_diffs + + +def find_hostgroup(module, name): + _args = { + "all": True, + "cn": name, + } + + _result = module.ipa_command("hostgroup_find", name, _args) + + if len(_result["result"]) > 1: + module.fail_json( + msg="There is more than one hostgroup '%s'" % (name)) + elif len(_result["result"]) == 1: + return _result["result"][0] + + return None + + +def gen_args(description, nomembers, rename): + _args = {} + if description is not None: + _args["description"] = description + if nomembers is not None: + _args["nomembers"] = nomembers + if rename is not None: + _args["rename"] = rename + + return _args + + +def main(): + ansible_module = IPAAnsibleModule( + argument_spec=dict( + # general + name=dict(type="list", elements="str", aliases=["cn"], + required=True), + # present + description=dict(type="str", default=None), + nomembers=dict(required=False, type='bool', default=None), + host=dict(required=False, type='list', elements="str", + default=None), + hostgroup=dict(required=False, type='list', elements="str", + default=None), + membermanager_user=dict(required=False, type='list', + elements="str", default=None), + membermanager_group=dict(required=False, type='list', + elements="str", default=None), + rename=dict(required=False, type='str', default=None, + aliases=["new_name"]), + action=dict(type="str", default="hostgroup", + choices=["member", "hostgroup"]), + # state + state=dict(type="str", default="present", + choices=["present", "absent", "renamed"]), + ), + supports_check_mode=True, + ) + + ansible_module._ansible_debug = True + + # Get parameters + + # general + names = ansible_module.params_get_lowercase("name") + + # present + description = ansible_module.params_get("description") + nomembers = ansible_module.params_get("nomembers") + host = ansible_module.params_get("host") + hostgroup = ansible_module.params_get_lowercase("hostgroup") + membermanager_user = ansible_module.params_get_lowercase( + "membermanager_user" + ) + membermanager_group = ansible_module.params_get_lowercase( + "membermanager_group" + ) + rename = ansible_module.params_get_lowercase("rename") + action = ansible_module.params_get("action") + # state + state = ansible_module.params_get("state") + + # Check parameters + + invalid = [] + if state == "present": + if len(names) != 1: + ansible_module.fail_json( + msg="Only one hostgroup can be added at a time.") + invalid = ["rename"] + if action == "member": + invalid.extend(["description", "nomembers"]) + + if state == "renamed": + if len(names) != 1: + ansible_module.fail_json( + msg="Only one hostgroup can be added at a time.") + if action == "member": + ansible_module.fail_json( + msg="Action '%s' can not be used with state '%s'" % + (action, state)) + invalid = [ + "description", "nomembers", "host", "hostgroup", + "membermanager_user", "membermanager_group" + ] + + if state == "absent": + if len(names) < 1: + ansible_module.fail_json( + msg="No name given.") + invalid = ["description", "nomembers", "rename"] + if action == "hostgroup": + invalid.extend(["host", "hostgroup"]) + + ansible_module.params_fail_used_invalid(invalid, state, action) + + # Init + + changed = False + exit_args = {} + diff_tracker = IPADiffTracker() + + # Connect to IPA API + with ansible_module.ipa_connect(): + + has_add_membermanager = ansible_module.ipa_command_exists( + "hostgroup_add_member_manager") + if ((membermanager_user is not None or + membermanager_group is not None) and not has_add_membermanager): + ansible_module.fail_json( + msg="Managing a membermanager user or group is not supported " + "by your IPA version" + ) + has_mod_rename = ansible_module.ipa_command_param_exists( + "hostgroup_mod", "rename") + if not has_mod_rename and rename is not None: + ansible_module.fail_json( + msg="Renaming hostgroups is not supported by your IPA version") + + # If hosts are given, ensure that the hosts are FQDN and also + # lowercase to be able to do a proper comparison to exising hosts + # in the hostgroup. + # Fixes #666 (ipahostgroup not idempotent and with error) + if host is not None: + default_domain = ansible_module.ipa_get_domain() + host = [ensure_fqdn(_host, default_domain).lower() + for _host in host] + + commands = [] + + for name in names: + # clean add/del lists + host_add, host_del = [], [] + hostgroup_add, hostgroup_del = [], [] + membermanager_user_add, membermanager_user_del = [], [] + membermanager_group_add, membermanager_group_del = [], [] + + # Make sure hostgroup exists + res_find = find_hostgroup(ansible_module, name) + res_find_orig = res_find + + # Create command + if state == "present": + # Generate args + args = gen_args(description, nomembers, rename) + attr_before, attr_after = {}, {} + + if action == "hostgroup": + # Found the hostgroup + if res_find is not None: + # For all settings is args, check if there are + # different settings in the find result. + # If yes: modify + if not compare_args_ipa(ansible_module, args, + res_find): + commands.append([name, "hostgroup_mod", args]) + attr_before, attr_after = gen_args_diff( + args, res_find) + else: + commands.append([name, "hostgroup_add", args]) + attr_before, attr_after = {}, args + # Set res_find to empty dict for next step + res_find = {} + + # Generate addition and removal lists + host_add, host_del = gen_add_del_lists( + host, res_find.get("member_host") + ) + + hostgroup_add, hostgroup_del = gen_add_del_lists( + hostgroup, res_find.get("member_hostgroup") + ) + + if has_add_membermanager: + membermanager_user_add, membermanager_user_del = \ + gen_add_del_lists( + membermanager_user, + res_find.get("membermanager_user") + ) + + membermanager_group_add, membermanager_group_del = \ + gen_add_del_lists( + membermanager_group, + res_find.get("membermanager_group") + ) + + elif action == "member": + if res_find is None: + ansible_module.fail_json( + msg="No hostgroup '%s'" % name) + + # Reduce add lists for member_host and member_hostgroup, + # to new entries only that are not in res_find. + host_add = gen_add_list( + host, res_find.get("member_host") + ) + hostgroup_add = gen_add_list( + hostgroup, res_find.get("member_hostgroup") + ) + + if has_add_membermanager: + # Reduce add list for membermanager_user and + # membermanager_group to new entries only that are + # not in res_find. + membermanager_user_add = gen_add_list( + membermanager_user, + res_find.get("membermanager_user") + ) + membermanager_group_add = gen_add_list( + membermanager_group, + res_find.get("membermanager_group") + ) + + elif state == "renamed": + if res_find is not None: + if rename != name: + commands.append( + [name, "hostgroup_mod", {"rename": rename}] + ) + else: + # If a hostgroup with the desired name exists, do nothing. + new_find = find_hostgroup(ansible_module, rename) + if new_find is None: + # Fail only if the either hostsgroups do not exist. + ansible_module.fail_json( + msg="Attribute `rename` can not be used, unless " + "hostgroup exists." + ) + + elif state == "absent": + if action == "hostgroup": + if res_find is not None: + commands.append([name, "hostgroup_del", {}]) + + elif action == "member": + if res_find is None: + ansible_module.fail_json( + msg="No hostgroup '%s'" % name) + + # Reduce del lists of member_host and member_hostgroup, + # to the entries only that are in res_find. + if host is not None: + host_del = gen_intersection_list( + host, res_find.get("member_host") + ) + if hostgroup is not None: + hostgroup_del = gen_intersection_list( + hostgroup, res_find.get("member_hostgroup") + ) + + if has_add_membermanager: + # Get lists of membermanager users that exist + # in IPA and should be removed. + membermanager_user_del = gen_intersection_list( + membermanager_user, + res_find.get("membermanager_user") + ) + membermanager_group_del = gen_intersection_list( + membermanager_group, + res_find.get("membermanager_group") + ) + + else: + ansible_module.fail_json(msg="Unkown state '%s'" % state) + + # Manage members + + # Add members + if host_add or hostgroup_add: + commands.append([ + name, "hostgroup_add_member", + { + "host": host_add, + "hostgroup": hostgroup_add, + } + ]) + + # Remove members + if host_del or hostgroup_del: + commands.append([ + name, "hostgroup_remove_member", + { + "host": host_del, + "hostgroup": hostgroup_del, + } + ]) + + # Manage membermanager users and groups + if has_add_membermanager: + # Add membermanager users and groups + if membermanager_user_add or membermanager_group_add: + commands.append([ + name, "hostgroup_add_member_manager", + { + "user": membermanager_user_add, + "group": membermanager_group_add, + } + ]) + # Remove membermanager users and groups + if membermanager_user_del or membermanager_group_del: + commands.append([ + name, "hostgroup_remove_member_manager", + { + "user": membermanager_user_del, + "group": membermanager_group_del, + } + ]) + + # Diff tracking + _orig = res_find_orig or {} + if state == "present": + if action == "hostgroup": + before, after = merge_diffs( + (attr_before, attr_after), + gen_member_diff( + "host", host_add, host_del, + _orig.get("member_host")), + gen_member_diff( + "hostgroup", hostgroup_add, hostgroup_del, + _orig.get("member_hostgroup")), + gen_member_diff( + "membermanager_user", + membermanager_user_add, membermanager_user_del, + _orig.get("membermanager_user")), + gen_member_diff( + "membermanager_group", + membermanager_group_add, membermanager_group_del, + _orig.get("membermanager_group")), + ) + diff_tracker.add_entry_diff(name, before, after) + elif action == "member": + before, after = merge_diffs( + gen_member_diff( + "host", host_add, host_del, + _orig.get("member_host")), + gen_member_diff( + "hostgroup", hostgroup_add, hostgroup_del, + _orig.get("member_hostgroup")), + gen_member_diff( + "membermanager_user", + membermanager_user_add, membermanager_user_del, + _orig.get("membermanager_user")), + gen_member_diff( + "membermanager_group", + membermanager_group_add, membermanager_group_del, + _orig.get("membermanager_group")), + ) + diff_tracker.add_entry_diff(name, before, after) + elif state == "renamed": + if rename != name and res_find_orig is not None: + diff_tracker.add_entry_diff( + name, {"cn": name}, {"cn": rename}) + elif state == "absent": + if action == "hostgroup": + if res_find_orig is not None: + diff_tracker.add_entry_diff( + name, + {"state": "present"}, {"state": "absent"}) + elif action == "member": + before, after = merge_diffs( + gen_member_diff( + "host", host_add, host_del, + _orig.get("member_host")), + gen_member_diff( + "hostgroup", hostgroup_add, hostgroup_del, + _orig.get("member_hostgroup")), + gen_member_diff( + "membermanager_user", + membermanager_user_add, membermanager_user_del, + _orig.get("membermanager_user")), + gen_member_diff( + "membermanager_group", + membermanager_group_add, membermanager_group_del, + _orig.get("membermanager_group")), + ) + diff_tracker.add_entry_diff(name, before, after) + + # Execute commands + + changed = ansible_module.execute_ipa_commands( + commands, fail_on_member_errors=True) + + # Done + + _exit_kwargs = dict(exit_args, **diff_tracker.build_diff()) + ansible_module.exit_json(changed=changed, **_exit_kwargs) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ipapwpolicy.py b/plugins/modules/ipapwpolicy.py new file mode 100644 index 000000000..5fbad1340 --- /dev/null +++ b/plugins/modules/ipapwpolicy.py @@ -0,0 +1,427 @@ +# -*- coding: utf-8 -*- + +# Authors: +# Thomas Woerner +# Rafael Guterres Jeffman +# +# Copyright (C) 2019-2022 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Temporary copy with --diff support. Remove once +# https://github.com/freeipa/ansible-freeipa/pull/1415 +# is merged and released. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.0", + "supported_by": "community", + "status": ["preview"], +} + +DOCUMENTATION = """ +--- +module: ipapwpolicy +short_description: Manage FreeIPA pwpolicies +description: Manage FreeIPA pwpolicies +extends_documentation_fragment: + - ipamodule_base_docs +options: + name: + description: The group name + type: list + elements: str + required: false + aliases: ["cn"] + maxlife: + description: Maximum password lifetime (in days). (int or "") + type: str + required: false + aliases: ["krbmaxpwdlife"] + minlife: + description: Minimum password lifetime (in hours). (int or "") + type: str + required: false + aliases: ["krbminpwdlife"] + history: + description: Password history size. (int or "") + type: str + required: false + aliases: ["krbpwdhistorylength"] + minclasses: + description: Minimum number of character classes. (int or "") + type: str + required: false + aliases: ["krbpwdmindiffchars"] + minlength: + description: Minimum length of password. (int or "") + type: str + required: false + aliases: ["krbpwdminlength"] + priority: + description: > + Priority of the policy (higher number means lower priority). (int or "") + type: str + required: false + aliases: ["cospriority"] + maxfail: + description: Consecutive failures before lockout. (int or "") + type: str + required: false + aliases: ["krbpwdmaxfailure"] + failinterval: + description: > + Period after which failure count will be reset (seconds). (int or "") + type: str + required: false + aliases: ["krbpwdfailurecountinterval"] + lockouttime: + description: Period for which lockout is enforced (seconds). (int or "") + type: str + required: false + aliases: ["krbpwdlockoutduration"] + maxrepeat: + description: > + Maximum number of same consecutive characters. + Requires IPA 4.9+. (int or "") + type: str + required: false + aliases: ["ipapwdmaxrepeat"] + maxsequence: + description: > + The maximum length of monotonic character sequences (abcd). + Requires IPA 4.9+. (int or "") + type: str + required: false + aliases: ["ipapwdmaxsequence"] + dictcheck: + description: > + Check if the password is a dictionary word. + Requires IPA 4.9+. (bool or "") + type: str + required: false + aliases: ["ipapwdictcheck"] + usercheck: + description: > + Check if the password contains the username. + Requires IPA 4.9+. (bool or "") + type: str + required: false + aliases: ["ipapwdusercheck"] + gracelimit: + description: > + Number of LDAP authentications allowed after expiration. + Requires IPA 4.10.1+. (int or "") + type: str + required: false + aliases: ["passwordgracelimit"] + state: + description: State to ensure + type: str + default: present + choices: ["present", "absent"] +author: + - Thomas Woerner (@t-woerner) + - Rafael Guterres Jeffman (@rjeffman) +""" + +EXAMPLES = """ +# Ensure pwpolicy is set for ops +- ipapwpolicy: + ipaadmin_password: SomeADMINpassword + name: ops + minlife: 7 + maxlife: 49 + history: 5 + priority: 1 + lockouttime: 300 + minlength: 8 +""" + +RETURN = """ +""" + +from ansible_collections.freeipa.ansible_freeipa.plugins.module_utils.ansible_freeipa_module import \ + IPAAnsibleModule, compare_args_ipa +from ansible_collections.linuxfabrik.lfops.plugins.module_utils.ipa_diff import \ + IPADiffTracker, gen_args_diff, gen_member_diff, merge_diffs + + +def find_pwpolicy(module, name): + _args = { + "all": True, + "cn": name, + } + + _result = module.ipa_command("pwpolicy_find", name, _args) + + if len(_result["result"]) > 1: + module.fail_json( + msg="There is more than one pwpolicy '%s'" % (name)) + elif len(_result["result"]) == 1: + return _result["result"][0] + + return None + + +def gen_args(module, + maxlife, minlife, history, minclasses, minlength, priority, + maxfail, failinterval, lockouttime, maxrepeat, maxsequence, + dictcheck, usercheck, gracelimit): + _args = {} + if maxlife is not None: + _args["krbmaxpwdlife"] = maxlife + if minlife is not None: + _args["krbminpwdlife"] = minlife + if history is not None: + _args["krbpwdhistorylength"] = history + if minclasses is not None: + _args["krbpwdmindiffchars"] = minclasses + if minlength is not None: + _args["krbpwdminlength"] = minlength + if priority is not None: + _args["cospriority"] = priority + if maxfail is not None: + _args["krbpwdmaxfailure"] = maxfail + if failinterval is not None: + _args["krbpwdfailurecountinterval"] = failinterval + if lockouttime is not None: + _args["krbpwdlockoutduration"] = lockouttime + if maxrepeat is not None: + _args["ipapwdmaxrepeat"] = maxrepeat + if maxsequence is not None: + _args["ipapwdmaxsequence"] = maxsequence + if dictcheck is not None: + if module.ipa_check_version("<", "4.9.10"): + # Allowed values: "TRUE", "FALSE", "" + _args["ipapwddictcheck"] = "TRUE" if dictcheck is True else \ + "FALSE" if dictcheck is False else dictcheck + else: + _args["ipapwddictcheck"] = dictcheck + if usercheck is not None: + if module.ipa_check_version("<", "4.9.10"): + # Allowed values: "TRUE", "FALSE", "" + _args["ipapwdusercheck"] = "TRUE" if usercheck is True else \ + "FALSE" if usercheck is False else usercheck + else: + _args["ipapwdusercheck"] = usercheck + if gracelimit is not None: + _args["passwordgracelimit"] = gracelimit + + return _args + + +def check_supported_params( + module, maxrepeat, maxsequence, dictcheck, usercheck, gracelimit +): + # All password checking parameters were added by the same commit, + # so we only need to test one of them. + has_password_check = module.ipa_command_param_exists( + "pwpolicy_add", "ipapwdmaxrepeat") + # check if gracelimit is supported + has_gracelimit = module.ipa_command_param_exists( + "pwpolicy_add", "passwordgracelimit") + + # If needed, report unsupported password checking paramteres + if ( + not has_password_check + and any([maxrepeat, maxsequence, dictcheck, usercheck]) + ): + module.fail_json( + msg="Your IPA version does not support arguments: " + "maxrepeat, maxsequence, dictcheck, usercheck.") + + if not has_gracelimit and gracelimit is not None: + module.fail_json( + msg="Your IPA version does not support 'gracelimit'.") + + +def main(): + ansible_module = IPAAnsibleModule( + argument_spec=dict( + # general + name=dict(type="list", elements="str", aliases=["cn"], + default=None, required=False), + # present + + maxlife=dict(type="str", aliases=["krbmaxpwdlife"], default=None), + minlife=dict(type="str", aliases=["krbminpwdlife"], default=None), + history=dict(type="str", aliases=["krbpwdhistorylength"], + default=None), + minclasses=dict(type="str", aliases=["krbpwdmindiffchars"], + default=None), + minlength=dict(type="str", aliases=["krbpwdminlength"], + default=None), + priority=dict(type="str", aliases=["cospriority"], default=None), + maxfail=dict(type="str", aliases=["krbpwdmaxfailure"], + default=None), + failinterval=dict(type="str", + aliases=["krbpwdfailurecountinterval"], + default=None), + lockouttime=dict(type="str", aliases=["krbpwdlockoutduration"], + default=None), + maxrepeat=dict(type="str", aliases=["ipapwdmaxrepeat"], + default=None), + maxsequence=dict(type="str", aliases=["ipapwdmaxsequence"], + default=None), + dictcheck=dict(type="str", aliases=["ipapwdictcheck"], + default=None), + usercheck=dict(type="str", aliases=["ipapwdusercheck"], + default=None), + gracelimit=dict(type="str", aliases=["passwordgracelimit"], + default=None), + # state + state=dict(type="str", default="present", + choices=["present", "absent"]), + ), + supports_check_mode=True, + ) + + ansible_module._ansible_debug = True + + # Get parameters + + # general + names = ansible_module.params_get("name") + + # present + maxlife = ansible_module.params_get_with_type_cast( + "maxlife", int, allow_empty=True) + minlife = ansible_module.params_get_with_type_cast( + "minlife", int, allow_empty=True) + history = ansible_module.params_get_with_type_cast( + "history", int, allow_empty=True) + minclasses = ansible_module.params_get_with_type_cast( + "minclasses", int, allow_empty=True) + minlength = ansible_module.params_get_with_type_cast( + "minlength", int, allow_empty=True) + priority = ansible_module.params_get_with_type_cast( + "priority", int, allow_empty=True) + maxfail = ansible_module.params_get_with_type_cast( + "maxfail", int, allow_empty=True) + failinterval = ansible_module.params_get_with_type_cast( + "failinterval", int, allow_empty=True) + lockouttime = ansible_module.params_get_with_type_cast( + "lockouttime", int, allow_empty=True) + maxrepeat = ansible_module.params_get_with_type_cast( + "maxrepeat", int, allow_empty=True) + maxsequence = ansible_module.params_get_with_type_cast( + "maxsequence", int, allow_empty=True) + dictcheck = ansible_module.params_get_with_type_cast( + "dictcheck", bool, allow_empty=True) + usercheck = ansible_module.params_get_with_type_cast( + "usercheck", bool, allow_empty=True) + gracelimit = ansible_module.params_get_with_type_cast( + "gracelimit", int, allow_empty=True) + + # state + state = ansible_module.params_get("state") + + # Check parameters + invalid = [] + + if names is None: + names = [u"global_policy"] + + if state == "present": + if len(names) != 1: + ansible_module.fail_json( + msg="Only one pwpolicy can be set at a time.") + + if state == "absent": + if len(names) < 1: + ansible_module.fail_json(msg="No name given.") + if "global_policy" in names: + ansible_module.fail_json( + msg="'global_policy' can not be made absent.") + invalid = ["maxlife", "minlife", "history", "minclasses", + "minlength", "priority", "maxfail", "failinterval", + "lockouttime", "maxrepeat", "maxsequence", "dictcheck", + "usercheck", "gracelimit"] + + ansible_module.params_fail_used_invalid(invalid, state) + + # Ensure gracelimit has proper limit. + if gracelimit: + if gracelimit < -1: + ansible_module.fail_json( + msg="'gracelimit' must be no less than -1") + + # Init + + changed = False + exit_args = {} + diff_tracker = IPADiffTracker() + + with ansible_module.ipa_connect(): + + check_supported_params( + ansible_module, maxrepeat, maxsequence, dictcheck, usercheck, + gracelimit + ) + + commands = [] + + for name in names: + # Try to find pwpolicy + res_find = find_pwpolicy(ansible_module, name) + + # Create command + if state == "present": + # Generate args + args = gen_args(ansible_module, + maxlife, minlife, history, minclasses, + minlength, priority, maxfail, failinterval, + lockouttime, maxrepeat, maxsequence, dictcheck, + usercheck, gracelimit) + + # Found the pwpolicy + if res_find is not None: + # For all settings is args, check if there are + # different settings in the find result. + # If yes: modify + if not compare_args_ipa(ansible_module, args, + res_find): + commands.append([name, "pwpolicy_mod", args]) + before, after = gen_args_diff(args, res_find) + diff_tracker.add_entry_diff(name, before, after) + else: + commands.append([name, "pwpolicy_add", args]) + diff_tracker.add_entry_diff(name, {}, args) + + elif state == "absent": + if res_find is not None: + commands.append([name, "pwpolicy_del", {}]) + diff_tracker.add_entry_diff( + name, {"state": "present"}, {"state": "absent"}) + + else: + ansible_module.fail_json(msg="Unkown state '%s'" % state) + + # Execute commands + + changed = ansible_module.execute_ipa_commands(commands) + + # Done + + _exit_kwargs = dict(exit_args, **diff_tracker.build_diff()) + ansible_module.exit_json(changed=changed, **_exit_kwargs) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ipasudocmd.py b/plugins/modules/ipasudocmd.py new file mode 100644 index 000000000..bec7b2fe4 --- /dev/null +++ b/plugins/modules/ipasudocmd.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- + +# Authors: +# Rafael Guterres Jeffman +# Thomas Woerner +# +# Copyright (C) 2019-2022 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Temporary copy with --diff support. Remove once +# https://github.com/freeipa/ansible-freeipa/pull/1415 +# is merged and released. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.0", + "supported_by": "community", + "status": ["preview"], +} + + +DOCUMENTATION = """ +--- +module: ipasudocmd +short_description: Manage FreeIPA sudo command +description: Manage FreeIPA sudo command +extends_documentation_fragment: + - ipamodule_base_docs +options: + name: + description: The sudo command + type: list + elements: str + required: true + aliases: ["sudocmd"] + description: + description: The command description + type: str + required: false + state: + description: State to ensure + type: str + default: present + choices: ["present", "absent"] +author: + - Rafael Guterres Jeffman (@rjeffman) + - Thomas Woerner (@t-woerner) +""" + +EXAMPLES = """ +# Ensure sudocmd is present +- ipasudocmd: + ipaadmin_password: SomeADMINpassword + name: /usr/bin/su + state: present + +# Ensure sudocmd is absent +- ipasudocmd: + ipaadmin_password: SomeADMINpassword + name: /usr/bin/su + state: absent +""" + +RETURN = """ +""" + +from ansible_collections.freeipa.ansible_freeipa.plugins.module_utils.ansible_freeipa_module import \ + IPAAnsibleModule, compare_args_ipa +from ansible_collections.linuxfabrik.lfops.plugins.module_utils.ipa_diff import \ + IPADiffTracker, gen_args_diff, gen_member_diff, merge_diffs + + +def find_sudocmd(module, name): + _args = { + "all": True, + "sudocmd": name, + } + + _result = module.ipa_command("sudocmd_find", name, _args) + + if len(_result["result"]) > 1: + module.fail_json( + msg="There is more than one sudocmd '%s'" % (name)) + elif len(_result["result"]) == 1: + return _result["result"][0] + + return None + + +def gen_args(description): + _args = {} + if description is not None: + _args["description"] = description + + return _args + + +def main(): + ansible_module = IPAAnsibleModule( + argument_spec=dict( + # general + name=dict(type="list", elements="str", aliases=["sudocmd"], + required=True), + # present + description=dict(type="str", default=None), + # state + state=dict(type="str", default="present", + choices=["present", "absent"]), + ), + supports_check_mode=True, + ) + + ansible_module._ansible_debug = True + + # Get parameters + + # general + names = ansible_module.params_get("name") + + # present + description = ansible_module.params_get("description") + # state + state = ansible_module.params_get("state") + + # Check parameters + invalid = [] + if state == "absent": + invalid = ["description"] + + ansible_module.params_fail_used_invalid(invalid, state) + + # Init + + changed = False + exit_args = {} + diff_tracker = IPADiffTracker() + + # Connect to IPA API + with ansible_module.ipa_connect(): + + commands = [] + + for name in names: + # Make sure hostgroup exists + res_find = find_sudocmd(ansible_module, name) + + # Create command + if state == "present": + # Generate args + args = gen_args(description) + if res_find is not None: + # For all settings in args, check if there are + # different settings in the find result. + # If yes: modify + if not compare_args_ipa(ansible_module, args, + res_find): + commands.append([name, "sudocmd_mod", args]) + before, after = gen_args_diff(args, res_find) + diff_tracker.add_entry_diff(name, before, after) + else: + commands.append([name, "sudocmd_add", args]) + diff_tracker.add_entry_diff(name, {}, args) + # Set res_find to empty dict for next step + res_find = {} + elif state == "absent": + if res_find is not None: + commands.append([name, "sudocmd_del", {}]) + diff_tracker.add_entry_diff( + name, {"state": "present"}, {"state": "absent"}) + else: + ansible_module.fail_json(msg="Unkown state '%s'" % state) + + changed = ansible_module.execute_ipa_commands(commands) + + # Done + + _exit_kwargs = dict(exit_args, **diff_tracker.build_diff()) + ansible_module.exit_json(changed=changed, **_exit_kwargs) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ipasudocmdgroup.py b/plugins/modules/ipasudocmdgroup.py new file mode 100644 index 000000000..1d71996f1 --- /dev/null +++ b/plugins/modules/ipasudocmdgroup.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- + +# Authors: +# Rafael Guterres Jeffman +# Thomas Woerner +# +# Copyright (C) 2019-2022 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Temporary copy with --diff support. Remove once +# https://github.com/freeipa/ansible-freeipa/pull/1415 +# is merged and released. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.0", + "supported_by": "community", + "status": ["preview"], +} + + +DOCUMENTATION = """ +--- +module: ipasudocmdgroup +short_description: Manage FreeIPA sudocmd groups +description: Manage FreeIPA sudocmd groups +extends_documentation_fragment: + - ipamodule_base_docs +options: + name: + description: The sudocmodgroup name + type: list + elements: str + required: true + aliases: ["cn"] + description: + description: The sudocmdgroup description + type: str + required: false + nomembers: + description: Suppress processing of membership attributes + required: false + type: bool + sudocmd: + description: List of sudocmds assigned to this sudocmdgroup. + required: false + type: list + elements: str + action: + description: Work on sudocmdgroup or member level + type: str + default: sudocmdgroup + choices: ["member", "sudocmdgroup"] + state: + description: State to ensure + type: str + default: present + choices: ["present", "absent"] +author: + - Rafael Guterres Jeffman (@rjeffman) + - Thomas Woerner (@t-woerner) +""" + +EXAMPLES = """ +# Ensure sudocmd-group 'network' is present +- ipasudocmdgroup: + ipaadmin_password: SomeADMINpassword + name: network + state: present + +# Ensure sudocmdgroup and sudocmd are present in 'network' sudocmdgroup +- ipasudocmdgroup: + ipaadmin_password: SomeADMINpassword + name: network + sudocmd: + - /usr/sbin/ifconfig + - /usr/sbin/iwlist + action: member + +# Ensure sudocmdgroup and sudocmd are absent in 'network' sudocmdgroup +- ipasudocmdgroup: + ipaadmin_password: SomeADMINpassword + name: network + sudocmd: + - /usr/sbin/ifconfig + - /usr/sbin/iwlist + action: member + state: absent + +# Ensure sudocmd-group 'network' is absent +- ipasudocmdgroup: + ipaadmin_password: SomeADMINpassword + name: network + action: member + state: absent +""" + +RETURN = """ +""" + +from ansible_collections.freeipa.ansible_freeipa.plugins.module_utils.ansible_freeipa_module import \ + IPAAnsibleModule, compare_args_ipa, gen_add_del_lists, \ + gen_add_list, gen_intersection_list, ipalib_errors +from ansible_collections.linuxfabrik.lfops.plugins.module_utils.ipa_diff import \ + IPADiffTracker, gen_args_diff, gen_member_diff, merge_diffs + + +def find_sudocmdgroup(module, name): + args = {"all": True} + + try: + _result = module.ipa_command("sudocmdgroup_show", name, args) + except ipalib_errors.NotFound: + return None + return _result["result"] + + +def gen_args(description, nomembers): + _args = {} + if description is not None: + _args["description"] = description + if nomembers is not None: + _args["nomembers"] = nomembers + + return _args + + +def gen_member_args(sudocmd): + _args = {} + if sudocmd is not None: + _args["member_sudocmd"] = sudocmd + + return _args + + +def main(): + ansible_module = IPAAnsibleModule( + argument_spec=dict( + # general + name=dict(type="list", elements="str", aliases=["cn"], + required=True), + # present + description=dict(type="str", default=None), + nomembers=dict(required=False, type='bool', default=None), + sudocmd=dict(required=False, type='list', elements="str", + default=None), + action=dict(type="str", default="sudocmdgroup", + choices=["member", "sudocmdgroup"]), + # state + state=dict(type="str", default="present", + choices=["present", "absent"]), + ), + supports_check_mode=True, + ) + + ansible_module._ansible_debug = True + + # Get parameters + + # general + names = ansible_module.params_get("name") + + # present + description = ansible_module.params_get("description") + nomembers = ansible_module.params_get("nomembers") + sudocmd = ansible_module.params_get("sudocmd") + action = ansible_module.params_get("action") + # state + state = ansible_module.params_get("state") + + # Check parameters + invalid = [] + + if state == "present": + if len(names) != 1: + ansible_module.fail_json( + msg="Only one sudocmdgroup can be added at a time.") + if action == "member": + invalid = ["description", "nomembers"] + + if state == "absent": + if len(names) < 1: + ansible_module.fail_json( + msg="No name given.") + invalid = ["description", "nomembers"] + if action == "sudocmdgroup": + invalid.extend(["sudocmd"]) + + ansible_module.params_fail_used_invalid(invalid, state, action) + + # Init + + changed = False + exit_args = {} + diff_tracker = IPADiffTracker() + + # Connect to IPA API + with ansible_module.ipa_connect(): + + commands = [] + + for name in names: + # Make sure hostgroup exists + res_find = find_sudocmdgroup(ansible_module, name) + res_find_orig = res_find + + # Create command + if state == "present": + # Generate args + args = gen_args(description, nomembers) + + if action == "sudocmdgroup": + attr_before, attr_after = {}, {} + sudocmd_add, sudocmd_del = [], [] + + # Found the hostgroup + if res_find is not None: + # For all settings is args, check if there are + # different settings in the find result. + # If yes: modify + if not compare_args_ipa(ansible_module, args, + res_find): + commands.append([name, "sudocmdgroup_mod", args]) + attr_before, attr_after = gen_args_diff( + args, res_find) + else: + commands.append([name, "sudocmdgroup_add", args]) + attr_before, attr_after = {}, args + # Set res_find to empty dict for next step + res_find = {} + + member_args = gen_member_args(sudocmd) + if not compare_args_ipa(ansible_module, member_args, + res_find): + # Generate addition and removal lists + sudocmd_add, sudocmd_del = \ + gen_add_del_lists( + sudocmd, + res_find.get("member_sudocmd")) + + # Add members + if len(sudocmd_add) > 0: + commands.append([name, "sudocmdgroup_add_member", + { + "sudocmd": sudocmd_add + } + ]) + # Remove members + if len(sudocmd_del) > 0: + commands.append([name, + "sudocmdgroup_remove_member", + { + "sudocmd": sudocmd_del + } + ]) + + # Diff tracking + before, after = merge_diffs( + (attr_before, attr_after), + gen_member_diff( + "sudocmd", sudocmd_add, sudocmd_del, + (res_find_orig or {}).get("member_sudocmd")), + ) + diff_tracker.add_entry_diff(name, before, after) + + elif action == "member": + if res_find is None: + ansible_module.fail_json( + msg="No sudocmdgroup '%s'" % name) + + # Ensure members are present + sudocmd_add = gen_add_list( + sudocmd, res_find.get("member_sudocmd") or []) + if sudocmd_add: + commands.append([name, "sudocmdgroup_add_member", + {"sudocmd": sudocmd_add} + ]) + before, after = merge_diffs( + gen_member_diff( + "sudocmd", sudocmd_add, [], + res_find.get("member_sudocmd")), + ) + diff_tracker.add_entry_diff(name, before, after) + elif state == "absent": + if action == "sudocmdgroup": + if res_find is not None: + commands.append([name, "sudocmdgroup_del", {}]) + diff_tracker.add_entry_diff( + name, + {"state": "present"}, {"state": "absent"}) + + elif action == "member": + if res_find is None: + ansible_module.fail_json( + msg="No sudocmdgroup '%s'" % name) + + sudocmd_del = gen_intersection_list( + sudocmd, res_find.get("member_sudocmd") or []) + if sudocmd_del: + commands.append([name, "sudocmdgroup_remove_member", + {"sudocmd": sudocmd_del} + ]) + before, after = merge_diffs( + gen_member_diff( + "sudocmd", [], sudocmd_del, + res_find.get("member_sudocmd")), + ) + diff_tracker.add_entry_diff(name, before, after) + else: + ansible_module.fail_json(msg="Unkown state '%s'" % state) + + changed = ansible_module.execute_ipa_commands( + commands, fail_on_member_errors=True) + + # Done + + _exit_kwargs = dict(exit_args, **diff_tracker.build_diff()) + ansible_module.exit_json(changed=changed, **_exit_kwargs) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ipasudorule.py b/plugins/modules/ipasudorule.py new file mode 100644 index 000000000..3b3c7871e --- /dev/null +++ b/plugins/modules/ipasudorule.py @@ -0,0 +1,1232 @@ +# -*- coding: utf-8 -*- + +# Authors: +# Rafael Guterres Jeffman +# Thomas Woerner +# +# Copyright (C) 2019-2022 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Temporary copy with --diff support. Remove once +# https://github.com/freeipa/ansible-freeipa/pull/1415 +# is merged and released. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.0", + "supported_by": "community", + "status": ["preview"], +} + +DOCUMENTATION = """ +--- +module: ipasudorule +short_description: Manage FreeIPA sudo rules +description: Manage FreeIPA sudo rules +extends_documentation_fragment: + - ipamodule_base_docs +options: + name: + description: The sudorule name + type: list + elements: str + required: false + aliases: ["cn"] + sudorules: + description: The list of sudorule dicts. + type: list + elements: dict + suboptions: + name: + description: The sudorule name + type: list + elements: str + required: true + aliases: ["cn"] + description: + description: The sudorule description + type: str + required: false + user: + description: List of users assigned to the sudo rule. + type: list + elements: str + required: false + usercategory: + description: User category the sudo rule applies to + type: str + required: false + choices: ["all", ""] + aliases: ["usercat"] + group: + description: List of user groups assigned to the sudo rule. + type: list + elements: str + required: false + runasgroupcategory: + description: RunAs Group category applied to the sudo rule. + type: str + required: false + choices: ["all", ""] + aliases: ["runasgroupcat"] + runasusercategory: + description: RunAs User category applied to the sudorule. + type: str + required: false + choices: ["all", ""] + aliases: ["runasusercat"] + nomembers: + description: Suppress processing of membership attributes + required: false + type: bool + host: + description: List of host names assigned to this sudorule. + required: false + type: list + elements: str + hostgroup: + description: List of host groups assigned to this sudorule. + required: false + type: list + elements: str + hostcategory: + description: Host category the sudo rule applies to. + type: str + required: false + choices: ["all", ""] + aliases: ["hostcat"] + allow_sudocmd: + description: List of allowed sudocmds assigned to this sudorule. + required: false + type: list + elements: str + allow_sudocmdgroup: + description: List of allowed sudocmd groups assigned to this sudorule. + required: false + type: list + elements: str + deny_sudocmd: + description: List of denied sudocmds assigned to this sudorule. + required: false + type: list + elements: str + deny_sudocmdgroup: + description: List of denied sudocmd groups assigned to this sudorule. + required: false + type: list + elements: str + cmdcategory: + description: Command category the sudo rule applies to + type: str + required: false + choices: ["all", ""] + aliases: ["cmdcat"] + order: + description: Order to apply this rule. + required: false + type: int + aliases: ["sudoorder"] + sudooption: + description: List of sudo options. + required: false + type: list + elements: str + aliases: ["options"] + runasuser: + description: List of users for Sudo to execute as. + required: false + type: list + elements: str + runasuser_group: + description: List of groups for Sudo to execute as. + required: false + type: list + elements: str + runasgroup: + description: List of groups for Sudo to execute as. + required: false + type: list + elements: str + hostmask: + description: Host masks of allowed hosts. + required: false + type: list + elements: str + description: + description: The sudorule description + type: str + required: false + user: + description: List of users assigned to the sudo rule. + type: list + elements: str + required: false + usercategory: + description: User category the sudo rule applies to + type: str + required: false + choices: ["all", ""] + aliases: ["usercat"] + group: + description: List of user groups assigned to the sudo rule. + type: list + elements: str + required: false + runasgroupcategory: + description: RunAs Group category applied to the sudo rule. + type: str + required: false + choices: ["all", ""] + aliases: ["runasgroupcat"] + runasusercategory: + description: RunAs User category applied to the sudorule. + type: str + required: false + choices: ["all", ""] + aliases: ["runasusercat"] + nomembers: + description: Suppress processing of membership attributes + required: false + type: bool + host: + description: List of host names assigned to this sudorule. + required: false + type: list + elements: str + hostgroup: + description: List of host groups assigned to this sudorule. + required: false + type: list + elements: str + hostcategory: + description: Host category the sudo rule applies to. + type: str + required: false + choices: ["all", ""] + aliases: ["hostcat"] + allow_sudocmd: + description: List of allowed sudocmds assigned to this sudorule. + required: false + type: list + elements: str + allow_sudocmdgroup: + description: List of allowed sudocmd groups assigned to this sudorule. + required: false + type: list + elements: str + deny_sudocmd: + description: List of denied sudocmds assigned to this sudorule. + required: false + type: list + elements: str + deny_sudocmdgroup: + description: List of denied sudocmd groups assigned to this sudorule. + required: false + type: list + elements: str + cmdcategory: + description: Command category the sudo rule applies to + type: str + required: false + choices: ["all", ""] + aliases: ["cmdcat"] + order: + description: Order to apply this rule. + required: false + type: int + aliases: ["sudoorder"] + sudooption: + description: List of sudo options. + required: false + type: list + elements: str + aliases: ["options"] + runasuser: + description: List of users for Sudo to execute as. + required: false + type: list + elements: str + runasuser_group: + description: List of groups for Sudo to execute as. + required: false + type: list + elements: str + runasgroup: + description: List of groups for Sudo to execute as. + required: false + type: list + elements: str + hostmask: + description: Host masks of allowed hosts. + required: false + type: list + elements: str + action: + description: Work on sudorule or member level + type: str + default: sudorule + choices: ["member", "sudorule"] + state: + description: State to ensure + type: str + default: present + choices: ["present", "absent", "enabled", "disabled"] +author: + - Rafael Guterres Jeffman (@rjeffman) + - Thomas Woerner (@t-woerner) +""" + +EXAMPLES = """ +# Ensure Sudo Rule tesrule1 is present +- ipasudorule: + ipaadmin_password: SomeADMINpassword + name: testrule1 + +# Ensure sudocmd is present in Sudo Rule +- ipasudorule: + ipaadmin_password: pass1234 + name: testrule1 + allow_sudocmd: + - /sbin/ifconfig + - /usr/bin/vim + action: member + state: absent + +# Ensure host server is present in Sudo Rule +- ipasudorule: + ipaadmin_password: SomeADMINpassword + name: testrule1 + host: server + action: member + +# Ensure hostgroup cluster is present in Sudo Rule +- ipasudorule: + ipaadmin_password: SomeADMINpassword + name: testrule1 + hostgroup: cluster + action: member + +# Ensure sudo rule for usercategory "all" is enabled +- ipasudorule: + ipaadmin_password: SomeADMINpassword + name: allusers + usercategory: all + state: enabled + +# Ensure sudo rule for hostcategory "all" is enabled +- ipasudorule: + ipaadmin_password: SomeADMINpassword + name: allhosts + hostcategory: all + state: enabled + +# Ensure sudo rule applies for hosts with hostmasks +- ipasudorule: + ipaadmin_password: SomeADMINpassword + name: testrule1 + hostmask: + - 192.168.122.1/24 + - 192.168.120.1/24 + +# Ensure sudorule 'runasuser' has 'ipasuers' group as runas users. +- ipasudorule: + ipaadmin_password: SomeADMINpassword + name: testrule1 + runasuser_group: ipausers + action: member + +# Ensure Sudo Rule tesrule1 is absent +- ipasudorule: + ipaadmin_password: SomeADMINpassword + name: testrule1 + state: absent + +# Ensure multiple Sudo Rules are present using batch mode. +- ipasudorule: + ipaadmin_password: SomeADMINpassword + sudorules: + - name: testrule1 + hostmask: + - 192.168.122.1/24 + - name: testrule2 + hostcategory: all +""" + +RETURN = """ +""" + +from ansible_collections.freeipa.ansible_freeipa.plugins.module_utils.ansible_freeipa_module import \ + IPAAnsibleModule, compare_args_ipa, gen_add_del_lists, gen_add_list, \ + gen_intersection_list, api_get_domain, ensure_fqdn, netaddr, to_text, \ + ipalib_errors, convert_param_value_to_lowercase, EntryFactory +from ansible_collections.linuxfabrik.lfops.plugins.module_utils.ipa_diff import \ + IPADiffTracker, gen_args_diff, gen_member_diff, merge_diffs + + +def find_sudorule(module, name): + _args = { + "all": True, + } + + try: + _result = module.ipa_command("sudorule_show", name, _args) + except ipalib_errors.NotFound: + return None + return _result["result"] + + +def gen_args(entry): + """Generate args for sudorule.""" + _args = {} + + if entry.description is not None: + _args['description'] = entry.description + if entry.usercategory is not None: + _args['usercategory'] = entry.usercategory + if entry.hostcategory is not None: + _args['hostcategory'] = entry.hostcategory + if entry.cmdcategory is not None: + _args['cmdcategory'] = entry.cmdcategory + if entry.runasusercategory is not None: + _args['ipasudorunasusercategory'] = entry.runasusercategory + if entry.runasgroupcategory is not None: + _args['ipasudorunasgroupcategory'] = entry.runasgroupcategory + if entry.order is not None: + _args['sudoorder'] = entry.order + if entry.nomembers is not None: + _args['nomembers'] = entry.nomembers + + return _args + + +def init_ansible_module(): + """Initialize IPAAnsibleModule object for sudorule.""" + sudorule_spec = dict( + description=dict(required=False, type="str", default=None), + usercategory=dict(required=False, type="str", default=None, + choices=["all", ""], aliases=['usercat']), + hostcategory=dict(required=False, type="str", default=None, + choices=["all", ""], aliases=['hostcat']), + nomembers=dict(required=False, type='bool', default=None), + host=dict(required=False, type='list', elements="str", + default=None), + hostgroup=dict(required=False, type='list', elements="str", + default=None), + hostmask=dict(required=False, type='list', elements="str", + default=None), + user=dict(required=False, type='list', elements="str", + default=None), + group=dict(required=False, type='list', elements="str", + default=None), + allow_sudocmd=dict(required=False, type="list", elements="str", + default=None), + deny_sudocmd=dict(required=False, type="list", elements="str", + default=None), + allow_sudocmdgroup=dict(required=False, type="list", + elements="str", default=None), + deny_sudocmdgroup=dict(required=False, type="list", elements="str", + default=None), + cmdcategory=dict(required=False, type="str", default=None, + choices=["all", ""], aliases=['cmdcat']), + runasusercategory=dict(required=False, type="str", default=None, + choices=["all", ""], + aliases=['runasusercat']), + runasgroupcategory=dict(required=False, type="str", default=None, + choices=["all", ""], + aliases=['runasgroupcat']), + runasuser=dict(required=False, type="list", elements="str", + default=None), + runasgroup=dict(required=False, type="list", elements="str", + default=None), + runasuser_group=dict(required=False, type="list", elements="str", + default=None), + order=dict(type="int", required=False, aliases=['sudoorder']), + sudooption=dict(required=False, type='list', elements="str", + default=None, aliases=["options"]), + ) + + ansible_module = IPAAnsibleModule( + argument_spec=dict( + # general + name=dict(type="list", elements="str", aliases=["cn"], + required=False), + sudorules=dict( + type="list", + defalut=None, + options=dict( + # name of the sudorule + name=dict(type="str", required=True, aliases=["cn"]), + # sudorule specific parameters + **sudorule_spec + ), + elements='dict', + required=False, + ), + # action + action=dict(type="str", default="sudorule", + choices=["member", "sudorule"]), + # state + state=dict(type="str", default="present", + choices=["present", "absent", + "enabled", "disabled"]), + # Specific parameters for simple use case + **sudorule_spec + ), + mutually_exclusive=[["name", "sudorules"]], + required_one_of=[["name", "sudorules"]], + supports_check_mode=True, + ) + + ansible_module._ansible_debug = True + return ansible_module + + +def convert_list_of_hostmask(hostmasks): + """Ensure all hostmasks is hostmask_list is a CIDR value.""" + return [ + to_text(netaddr.IPNetwork(mask).cidr) + for mask in ( + hostmasks if isinstance(hostmasks, (list, tuple)) + else [hostmasks] + ) + ] + + +def convert_list_of_hostnames(hostnames): + """Ensure all hostnames in hostnames are lowercase FQDN.""" + return list( + set( + ensure_fqdn(value.lower(), api_get_domain()) + for value in ( + hostnames if isinstance(hostnames, (list, tuple)) + else [hostnames] + ) + ) + ) + + +def validate_entry(module, entry, state, action): + """Ensure entry object is valid.""" + if state == "present" and action == "sudorule": + # Ensure the entry is valid for state:present, action:sudorule. + if entry.hostcategory == 'all' and any([entry.host, entry.hostgroup]): + module.fail_json( + msg="Hosts cannot be added when host category='all'" + ) + if entry.usercategory == 'all' and any([entry.user, entry.group]): + module.fail_json( + msg="Users cannot be added when user category='all'" + ) + if entry.cmdcategory == 'all' \ + and any([entry.allow_sudocmd, entry.allow_sudocmdgroup]): + module.fail_json( + msg="Commands cannot be added when command category='all'" + ) + return entry + + +def main(): + ansible_module = init_ansible_module() + # Get parameters + # general + names = ansible_module.params_get("name") + # sudorules = ansible_module.params_get("sudorules") + # action + action = ansible_module.params_get("action") + # state + state = ansible_module.params_get("state") + + # Check parameters + invalid = [] + + if state == "present": + if names is not None and len(names) != 1: + ansible_module.fail_json( + msg="Only one sudorule can be added at a time using 'name'.") + if action == "member": + invalid = ["description", "usercategory", "hostcategory", + "cmdcategory", "runasusercategory", + "runasgroupcategory", "order", "nomembers"] + + elif state == "absent": + invalid = ["description", "usercategory", "hostcategory", + "cmdcategory", "runasusercategory", + "runasgroupcategory", "nomembers", "order"] + if action == "sudorule": + invalid.extend(["host", "hostgroup", "hostmask", "user", "group", + "runasuser", "runasgroup", "allow_sudocmd", + "allow_sudocmdgroup", "deny_sudocmd", + "deny_sudocmdgroup", "sudooption", + "runasuser_group"]) + + elif state in ["enabled", "disabled"]: + if action == "member": + ansible_module.fail_json( + msg="Action member can not be used with states enabled and " + "disabled") + invalid = ["description", "usercategory", "hostcategory", + "cmdcategory", "runasusercategory", "runasgroupcategory", + "nomembers", "nomembers", "host", "hostgroup", "hostmask", + "user", "group", "allow_sudocmd", "allow_sudocmdgroup", + "deny_sudocmd", "deny_sudocmdgroup", "runasuser", + "runasgroup", "order", "sudooption", "runasuser_group"] + else: + ansible_module.fail_json(msg="Invalid state '%s'" % state) + + # Init + changed = False + exit_args = {} + diff_tracker = IPADiffTracker() + + # Factory parameters + params = { + "name": {}, + "description": {}, + "cmdcategory": {}, + "usercategory": {}, + "hostcategory": {}, + "runasusercategory": {}, + "runasgroupcategory": {}, + "host": {"convert": [convert_list_of_hostnames]}, + "hostgroup": {"convert": [convert_param_value_to_lowercase]}, + "hostmask": {"convert": [convert_list_of_hostmask]}, + "user": {"convert": [convert_param_value_to_lowercase]}, + "group": {"convert": [convert_param_value_to_lowercase]}, + "allow_sudocmd": {}, + "allow_sudocmdgroup": {"convert": [convert_param_value_to_lowercase]}, + "deny_sudocmd": {}, + "deny_sudocmdgroup": {"convert": [convert_param_value_to_lowercase]}, + "sudooption": {}, + "order": {}, + "runasuser": {"convert": [convert_param_value_to_lowercase]}, + "runasuser_group": {"convert": [convert_param_value_to_lowercase]}, + "runasgroup": {"convert": [convert_param_value_to_lowercase]}, + "nomembers": {}, + } + + # Connect to IPA API + with ansible_module.ipa_connect(): + commands = [] + + # Creating factory after connect as host conversion + # requires 'api_get_domain()' to be available + entry_factory = EntryFactory( + ansible_module, + invalid, + "sudorules", + params, + validate_entry=validate_entry, + state=state, + action=action, + ) + + for entry in entry_factory: + host_add, host_del = [], [] + user_add, user_del = [], [] + group_add, group_del = [], [] + hostgroup_add, hostgroup_del = [], [] + hostmask_add, hostmask_del = [], [] + allow_cmd_add, allow_cmd_del = [], [] + allow_cmdgroup_add, allow_cmdgroup_del = [], [] + deny_cmd_add, deny_cmd_del = [], [] + deny_cmdgroup_add, deny_cmdgroup_del = [], [] + sudooption_add, sudooption_del = [], [] + runasuser_add, runasuser_del = [], [] + runasuser_group_add, runasuser_group_del = [], [] + runasgroup_add, runasgroup_del = [], [] + + # Try to retrieve sudorule + res_find = find_sudorule(ansible_module, entry.name) + res_find_orig = res_find + + # Fail if sudorule must exist but is not found + if ( + (state in ["enabled", "disabled"] or action == "member") + and res_find is None + ): + ansible_module.fail_json(msg="No sudorule '%s'" % entry.name) + + # Create command + if state == "present": + # Generate args + args = gen_args(entry) + attr_before, attr_after = {}, {} + if action == "sudorule": + # Found the sudorule + if res_find is not None: + # Remove empty usercategory, hostcategory, + # cmdcaterory, runasusercategory and hostcategory + # from args if "" and if the category is not in the + # sudorule. The empty string is used to reset the + # category. + if ( + "usercategory" in args + and args["usercategory"] == "" + and "usercategory" not in res_find + ): + del args["usercategory"] + if ( + "hostcategory" in args + and args["hostcategory"] == "" + and "hostcategory" not in res_find + ): + del args["hostcategory"] + if ( + "cmdcategory" in args + and args["cmdcategory"] == "" + and "cmdcategory" not in res_find + ): + del args["cmdcategory"] + if ( + "ipasudorunasusercategory" in args + and args["ipasudorunasusercategory"] == "" + and "ipasudorunasusercategory" not in res_find + ): + del args["ipasudorunasusercategory"] + if ( + "ipasudorunasgroupcategory" in args + and args["ipasudorunasgroupcategory"] == "" + and "ipasudorunasgroupcategory" not in res_find + ): + del args["ipasudorunasgroupcategory"] + + # For all settings is args, check if there are + # different settings in the find result. + # If yes: modify + if not compare_args_ipa(ansible_module, args, + res_find): + commands.append([entry.name, "sudorule_mod", args]) + attr_before, attr_after = gen_args_diff( + args, res_find) + else: + commands.append([entry.name, "sudorule_add", args]) + attr_before, attr_after = {}, args + # Set res_find to empty dict for next step + res_find = {} + + # Generate addition and removal lists + host_add, host_del = gen_add_del_lists( + entry.host, ( + list(res_find.get('memberhost_host', [])) + + list(res_find.get('externalhost', [])) + ) + ) + + hostgroup_add, hostgroup_del = gen_add_del_lists( + entry.hostgroup, + res_find.get('memberhost_hostgroup', []) + ) + + hostmask_add, hostmask_del = gen_add_del_lists( + entry.hostmask, res_find.get('hostmask', [])) + + user_add, user_del = gen_add_del_lists( + entry.user, ( + list(res_find.get('memberuser_user', [])) + + list(res_find.get('externaluser', [])) + ) + ) + + group_add, group_del = gen_add_del_lists( + entry.group, res_find.get('memberuser_group', [])) + + allow_cmd_add, allow_cmd_del = gen_add_del_lists( + entry.allow_sudocmd, + res_find.get('memberallowcmd_sudocmd', [])) + + allow_cmdgroup_add, allow_cmdgroup_del = gen_add_del_lists( + entry.allow_sudocmdgroup, + res_find.get('memberallowcmd_sudocmdgroup', [])) + + deny_cmd_add, deny_cmd_del = gen_add_del_lists( + entry.deny_sudocmd, + res_find.get('memberdenycmd_sudocmd', [])) + + deny_cmdgroup_add, deny_cmdgroup_del = gen_add_del_lists( + entry.deny_sudocmdgroup, + res_find.get('memberdenycmd_sudocmdgroup', [])) + + sudooption_add, sudooption_del = gen_add_del_lists( + entry.sudooption, res_find.get('ipasudoopt', [])) + + # runasuser attribute can be used with both IPA and + # non-IPA (external) users. IPA will handle the correct + # attribute to properly store data, so we need to compare + # the provided list against both users and external + # users list. + runasuser_add, runasuser_del = gen_add_del_lists( + entry.runasuser, ( + list(res_find.get('ipasudorunas_user', [])) + + list(res_find.get('ipasudorunasextuser', [])) + ) + ) + runasuser_group_add, runasuser_group_del = ( + gen_add_del_lists( + entry.runasuser_group, + res_find.get('ipasudorunas_group', []) + ) + ) + + # runasgroup attribute can be used with both IPA and + # non-IPA (external) groups. IPA will handle the correct + # attribute to properly store data, so we need to compare + # the provided list against both groups and external + # groups list. + runasgroup_add, runasgroup_del = gen_add_del_lists( + entry.runasgroup, + ( + list(res_find.get('ipasudorunasgroup_group', [])) + + list(res_find.get('ipasudorunasextgroup', [])) + ) + ) + + elif action == "member": + # Generate add lists for host, hostgroup, user, group, + # allow_sudocmd, allow_sudocmdgroup, deny_sudocmd, + # deny_sudocmdgroup, sudooption, runasuser, runasgroup + # and res_find to only try to add the items that not in + # the sudorule already + if entry.host is not None: + host_add = gen_add_list( + entry.host, ( + list(res_find.get("memberhost_host", [])) + + list(res_find.get("externalhost", [])) + ) + ) + if entry.hostgroup is not None: + hostgroup_add = gen_add_list( + entry.hostgroup, + res_find.get("memberhost_hostgroup") + ) + if entry.hostmask is not None: + hostmask_add = gen_add_list( + entry.hostmask, res_find.get("hostmask")) + if entry.user is not None: + user_add = gen_add_list( + entry.user, ( + list(res_find.get('memberuser_user', [])) + + list(res_find.get('externaluser', [])) + ) + ) + if entry.group is not None: + group_add = gen_add_list( + entry.group, res_find.get("memberuser_group")) + if entry.allow_sudocmd is not None: + allow_cmd_add = gen_add_list( + entry.allow_sudocmd, + res_find.get("memberallowcmd_sudocmd") + ) + if entry.allow_sudocmdgroup is not None: + allow_cmdgroup_add = gen_add_list( + entry.allow_sudocmdgroup, + res_find.get("memberallowcmd_sudocmdgroup") + ) + if entry.deny_sudocmd is not None: + deny_cmd_add = gen_add_list( + entry.deny_sudocmd, + res_find.get("memberdenycmd_sudocmd") + ) + if entry.deny_sudocmdgroup is not None: + deny_cmdgroup_add = gen_add_list( + entry.deny_sudocmdgroup, + res_find.get("memberdenycmd_sudocmdgroup") + ) + if entry.sudooption is not None: + sudooption_add = gen_add_list( + entry.sudooption, res_find.get("ipasudoopt")) + # runasuser attribute can be used with both IPA and + # non-IPA (external) users, so we need to compare + # the provided list against both users and external + # users list. + if entry.runasuser is not None: + runasuser_add = gen_add_list( + entry.runasuser, + (list(res_find.get('ipasudorunas_user', [])) + + list(res_find.get('ipasudorunasextuser', []))) + ) + if entry.runasuser_group is not None: + runasuser_group_add = gen_add_list( + entry.runasuser_group, + res_find.get('ipasudorunas_group', []) + ) + # runasgroup attribute can be used with both IPA and + # non-IPA (external) groups, so we need to compare + # the provided list against both users and external + # groups list. + if entry.runasgroup is not None: + runasgroup_add = gen_add_list( + entry.runasgroup, + (list(res_find.get("ipasudorunasgroup_group", [])) + + list(res_find.get("ipasudorunasextgroup", []))) + ) + + elif state == "absent": + if action == "sudorule": + if res_find is not None: + commands.append([entry.name, "sudorule_del", {}]) + + elif action == "member": + # Generate intersection lists for host, hostgroup, user, + # group, allow_sudocmd, allow_sudocmdgroup, deny_sudocmd + # deny_sudocmdgroup, sudooption, runasuser, runasgroup + # and res_find to only try to remove the items that are + # in sudorule + if entry.host is not None: + host_del = gen_intersection_list( + entry.host, ( + list(res_find.get("memberhost_host", [])) + + list(res_find.get("externalhost", [])) + ) + ) + + if entry.hostgroup is not None: + hostgroup_del = gen_intersection_list( + entry.hostgroup, + res_find.get("memberhost_hostgroup") + ) + + if entry.hostmask is not None: + hostmask_del = gen_intersection_list( + entry.hostmask, res_find.get("hostmask")) + + if entry.user is not None: + user_del = gen_intersection_list( + entry.user, ( + list(res_find.get('memberuser_user', [])) + + list(res_find.get('externaluser', [])) + ) + ) + + if entry.group is not None: + group_del = gen_intersection_list( + entry.group, res_find.get("memberuser_group")) + + if entry.allow_sudocmd is not None: + allow_cmd_del = gen_intersection_list( + entry.allow_sudocmd, + res_find.get("memberallowcmd_sudocmd") + ) + if entry.allow_sudocmdgroup is not None: + allow_cmdgroup_del = gen_intersection_list( + entry.allow_sudocmdgroup, + res_find.get("memberallowcmd_sudocmdgroup") + ) + if entry.deny_sudocmd is not None: + deny_cmd_del = gen_intersection_list( + entry.deny_sudocmd, + res_find.get("memberdenycmd_sudocmd") + ) + if entry.deny_sudocmdgroup is not None: + deny_cmdgroup_del = gen_intersection_list( + entry.deny_sudocmdgroup, + res_find.get("memberdenycmd_sudocmdgroup") + ) + if entry.sudooption is not None: + sudooption_del = gen_intersection_list( + entry.sudooption, res_find.get("ipasudoopt")) + # runasuser attribute can be used with both IPA and + # non-IPA (external) users, so we need to compare + # the provided list against both users and external + # users list. + if entry.runasuser is not None: + runasuser_del = gen_intersection_list( + entry.runasuser, ( + list(res_find.get('ipasudorunas_user', [])) + + list(res_find.get('ipasudorunasextuser', [])) + ) + ) + if entry.runasuser_group is not None: + runasuser_group_del = gen_intersection_list( + entry.runasuser_group, + res_find.get('ipasudorunas_group', []) + ) + # runasgroup attribute can be used with both IPA and + # non-IPA (external) groups, so we need to compare + # the provided list against both groups and external + # groups list. + if entry.runasgroup is not None: + runasgroup_del = gen_intersection_list( + entry.runasgroup, + ( + list(res_find.get( + "ipasudorunasgroup_group", [])) + + list(res_find.get( + "ipasudorunasextgroup", [])) + ) + ) + + elif state == "enabled": + # sudorule_enable is not failing on an enabled sudorule + # Therefore it is needed to have a look at the ipaenabledflag + # in res_find. + # FreeIPA 4.9.10+ and 4.10 use proper mapping for + # boolean values, so we need to convert it to str + # for comparison. + # See: https://github.com/freeipa/freeipa/pull/6294 + enabled_flag = str(res_find.get("ipaenabledflag", [False])[0]) + if enabled_flag.upper() != "TRUE": + commands.append([entry.name, "sudorule_enable", {}]) + diff_tracker.add_entry_diff( + entry.name, + {"enabled": False}, {"enabled": True}) + + elif state == "disabled": + # sudorule_disable is not failing on an disabled sudorule + # Therefore it is needed to have a look at the ipaenabledflag + # in res_find. + # FreeIPA 4.9.10+ and 4.10 use proper mapping for + # boolean values, so we need to convert it to str + # for comparison. + # See: https://github.com/freeipa/freeipa/pull/6294 + enabled_flag = str(res_find.get("ipaenabledflag", [False])[0]) + if enabled_flag.upper() != "FALSE": + commands.append([entry.name, "sudorule_disable", {}]) + diff_tracker.add_entry_diff( + entry.name, + {"enabled": True}, {"enabled": False}) + + else: + ansible_module.fail_json(msg="Unkown state '%s'" % state) + + # Manage members. + # Manage hosts and hostgroups + if any([host_add, hostgroup_add, hostmask_add]): + params = {"host": host_add, "hostgroup": hostgroup_add} + # An empty Hostmask cannot be used, or IPA API will fail. + if hostmask_add: + params["hostmask"] = hostmask_add + commands.append([entry.name, "sudorule_add_host", params]) + + if any([host_del, hostgroup_del, hostmask_del]): + params = {"host": host_del, "hostgroup": hostgroup_del} + # An empty Hostmask cannot be used, or IPA API will fail. + if hostmask_del: + params["hostmask"] = hostmask_del + commands.append([entry.name, "sudorule_remove_host", params]) + + # Manage users and groups + if user_add or group_add: + commands.append([ + entry.name, "sudorule_add_user", + {"user": user_add, "group": group_add} + ]) + if user_del or group_del: + commands.append([ + entry.name, "sudorule_remove_user", + {"user": user_del, "group": group_del} + ]) + + # Manage commands allowed + if allow_cmd_add or allow_cmdgroup_add: + commands.append([ + entry.name, "sudorule_add_allow_command", + { + "sudocmd": allow_cmd_add, + "sudocmdgroup": allow_cmdgroup_add, + } + ]) + if allow_cmd_del or allow_cmdgroup_del: + commands.append([ + entry.name, "sudorule_remove_allow_command", + { + "sudocmd": allow_cmd_del, + "sudocmdgroup": allow_cmdgroup_del + } + ]) + # Manage commands denied + if deny_cmd_add or deny_cmdgroup_add: + commands.append([ + entry.name, "sudorule_add_deny_command", + { + "sudocmd": deny_cmd_add, + "sudocmdgroup": deny_cmdgroup_add, + } + ]) + if deny_cmd_del or deny_cmdgroup_del: + commands.append([ + entry.name, "sudorule_remove_deny_command", + { + "sudocmd": deny_cmd_del, + "sudocmdgroup": deny_cmdgroup_del + } + ]) + # Manage RunAS users + if runasuser_add or runasuser_group_add: + # Can't use empty lists with command "sudorule_add_runasuser". + _args = {} + if runasuser_add: + _args["user"] = runasuser_add + if runasuser_group_add: + _args["group"] = runasuser_group_add + commands.append([entry.name, "sudorule_add_runasuser", _args]) + if runasuser_del or runasuser_group_del: + commands.append([ + entry.name, + "sudorule_remove_runasuser", + {"user": runasuser_del, "group": runasuser_group_del} + ]) + + # Manage RunAS Groups + if runasgroup_add: + commands.append([ + entry.name, "sudorule_add_runasgroup", + {"group": runasgroup_add} + ]) + if runasgroup_del: + commands.append([ + entry.name, "sudorule_remove_runasgroup", + {"group": runasgroup_del} + ]) + # Manage sudo options + if sudooption_add: + for option in sudooption_add: + commands.append([ + entry.name, "sudorule_add_option", + {"ipasudoopt": option} + ]) + if sudooption_del: + for option in sudooption_del: + commands.append([ + entry.name, "sudorule_remove_option", + {"ipasudoopt": option} + ]) + + # Diff tracking + _orig = res_find_orig or {} + if state == "present": + before, after = merge_diffs( + (attr_before, attr_after), + gen_member_diff( + "host", host_add, host_del, + list(_orig.get("memberhost_host", [])) + + list(_orig.get("externalhost", []))), + gen_member_diff( + "hostgroup", hostgroup_add, hostgroup_del, + _orig.get("memberhost_hostgroup")), + gen_member_diff( + "hostmask", hostmask_add, hostmask_del, + _orig.get("hostmask")), + gen_member_diff( + "user", user_add, user_del, + list(_orig.get("memberuser_user", [])) + + list(_orig.get("externaluser", []))), + gen_member_diff( + "group", group_add, group_del, + _orig.get("memberuser_group")), + gen_member_diff( + "allow_sudocmd", allow_cmd_add, allow_cmd_del, + _orig.get("memberallowcmd_sudocmd")), + gen_member_diff( + "allow_sudocmdgroup", + allow_cmdgroup_add, allow_cmdgroup_del, + _orig.get("memberallowcmd_sudocmdgroup")), + gen_member_diff( + "deny_sudocmd", deny_cmd_add, deny_cmd_del, + _orig.get("memberdenycmd_sudocmd")), + gen_member_diff( + "deny_sudocmdgroup", + deny_cmdgroup_add, deny_cmdgroup_del, + _orig.get("memberdenycmd_sudocmdgroup")), + gen_member_diff( + "sudooption", sudooption_add, sudooption_del, + _orig.get("ipasudoopt")), + gen_member_diff( + "runasuser", runasuser_add, runasuser_del, + list(_orig.get("ipasudorunas_user", [])) + + list(_orig.get("ipasudorunasextuser", []))), + gen_member_diff( + "runasuser_group", + runasuser_group_add, runasuser_group_del, + _orig.get("ipasudorunas_group")), + gen_member_diff( + "runasgroup", runasgroup_add, runasgroup_del, + list(_orig.get("ipasudorunasgroup_group", [])) + + list(_orig.get("ipasudorunasextgroup", []))), + ) + diff_tracker.add_entry_diff(entry.name, before, after) + elif state == "absent": + if action == "sudorule": + if res_find_orig is not None: + diff_tracker.add_entry_diff( + entry.name, + {"state": "present"}, {"state": "absent"}) + elif action == "member": + before, after = merge_diffs( + gen_member_diff( + "host", host_add, host_del, + list(_orig.get("memberhost_host", [])) + + list(_orig.get("externalhost", []))), + gen_member_diff( + "hostgroup", hostgroup_add, hostgroup_del, + _orig.get("memberhost_hostgroup")), + gen_member_diff( + "hostmask", hostmask_add, hostmask_del, + _orig.get("hostmask")), + gen_member_diff( + "user", user_add, user_del, + list(_orig.get("memberuser_user", [])) + + list(_orig.get("externaluser", []))), + gen_member_diff( + "group", group_add, group_del, + _orig.get("memberuser_group")), + gen_member_diff( + "allow_sudocmd", allow_cmd_add, allow_cmd_del, + _orig.get("memberallowcmd_sudocmd")), + gen_member_diff( + "allow_sudocmdgroup", + allow_cmdgroup_add, allow_cmdgroup_del, + _orig.get("memberallowcmd_sudocmdgroup")), + gen_member_diff( + "deny_sudocmd", deny_cmd_add, deny_cmd_del, + _orig.get("memberdenycmd_sudocmd")), + gen_member_diff( + "deny_sudocmdgroup", + deny_cmdgroup_add, deny_cmdgroup_del, + _orig.get("memberdenycmd_sudocmdgroup")), + gen_member_diff( + "sudooption", sudooption_add, sudooption_del, + _orig.get("ipasudoopt")), + gen_member_diff( + "runasuser", runasuser_add, runasuser_del, + list(_orig.get("ipasudorunas_user", [])) + + list(_orig.get("ipasudorunasextuser", []))), + gen_member_diff( + "runasuser_group", + runasuser_group_add, runasuser_group_del, + _orig.get("ipasudorunas_group")), + gen_member_diff( + "runasgroup", runasgroup_add, runasgroup_del, + list(_orig.get("ipasudorunasgroup_group", [])) + + list(_orig.get("ipasudorunasextgroup", []))), + ) + diff_tracker.add_entry_diff(entry.name, before, after) + + # Execute commands + + changed = ansible_module.execute_ipa_commands( + commands, batch=True, fail_on_member_errors=True) + + # Done + + _exit_kwargs = dict(exit_args, **diff_tracker.build_diff()) + ansible_module.exit_json(changed=changed, **_exit_kwargs) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ipauser.py b/plugins/modules/ipauser.py new file mode 100644 index 000000000..a1f32da45 --- /dev/null +++ b/plugins/modules/ipauser.py @@ -0,0 +1,1888 @@ +# -*- coding: utf-8 -*- + +# Authors: +# Thomas Woerner +# +# Copyright (C) 2019-2022 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Temporary copy with --diff support. Remove once +# https://github.com/freeipa/ansible-freeipa/pull/1415 +# is merged and released. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.0", + "supported_by": "community", + "status": ["preview"], +} + +DOCUMENTATION = """ +--- +module: ipauser +short_description: Manage FreeIPA users +description: Manage FreeIPA users +extends_documentation_fragment: + - ipamodule_base_docs +options: + name: + description: The list of users (internally uid). + type: list + elements: str + required: false + aliases: ["login"] + users: + description: The list of user dicts (internally uid). + type: list + elements: dict + suboptions: + name: + description: The user (internally uid). + type: str + required: true + aliases: ["login"] + first: + description: The first name. Required if user does not exist. + type: str + required: false + aliases: ["givenname"] + last: + description: The last name. Required if user doesnot exst. + type: str + required: false + aliases: ["sn"] + fullname: + description: The full name + type: str + required: false + aliases: ["cn"] + displayname: + description: The display name + type: str + required: false + initials: + description: Initials + type: str + required: false + homedir: + description: The home directory + type: str + required: false + gecos: + description: The GECOS + type: str + required: false + shell: + description: The login shell + type: str + required: false + aliases: ["loginshell"] + email: + description: List of email addresses + type: list + elements: str + required: false + principal: + description: The kerberos principal + type: list + elements: str + required: false + aliases: ["principalname", "krbprincipalname"] + principalexpiration: + description: | + The kerberos principal expiration date + (possible formats: YYYYMMddHHmmssZ, YYYY-MM-ddTHH:mm:ssZ, + YYYY-MM-ddTHH:mmZ, YYYY-MM-ddZ, YYYY-MM-dd HH:mm:ssZ, + YYYY-MM-dd HH:mmZ) The trailing 'Z' can be skipped. + type: str + required: false + aliases: ["krbprincipalexpiration"] + passwordexpiration: + description: | + The kerberos password expiration date (FreeIPA-4.7+) + (possible formats: YYYYMMddHHmmssZ, YYYY-MM-ddTHH:mm:ssZ, + YYYY-MM-ddTHH:mmZ, YYYY-MM-ddZ, YYYY-MM-dd HH:mm:ssZ, + YYYY-MM-dd HH:mmZ) The trailing 'Z' can be skipped. + Only usable with IPA versions 4.7 and up. + type: str + required: false + aliases: ["krbpasswordexpiration"] + password: + description: The user password + type: str + required: false + random: + description: Generate a random user password + required: false + type: bool + uid: + description: User ID Number (system will assign one if not provided) + type: int + required: false + aliases: ["uidnumber"] + gid: + description: Group ID Number + type: int + required: false + aliases: ["gidnumber"] + street: + description: Street address + type: str + required: false + city: + description: City + type: str + required: false + userstate: + description: State/Province + type: str + required: false + aliases: ["st"] + postalcode: + description: Postalcode/ZIP + type: str + required: false + aliases: ["zip"] + phone: + description: List of telephone numbers + type: list + elements: str + required: false + aliases: ["telephonenumber"] + mobile: + description: List of mobile telephone numbers + type: list + elements: str + required: false + pager: + description: List of pager numbers + type: list + elements: str + required: false + fax: + description: List of fax numbers + type: list + elements: str + required: false + aliases: ["facsimiletelephonenumber"] + orgunit: + description: Org. Unit + type: str + required: false + aliases: ["ou"] + title: + description: The job title + type: str + required: false + manager: + description: List of managers + type: list + elements: str + required: false + carlicense: + description: List of car licenses + type: list + elements: str + required: false + sshpubkey: + description: List of SSH public keys + required: false + type: list + elements: str + aliases: ["ipasshpubkey"] + userauthtype: + description: + List of supported user authentication types + Use empty string to reset userauthtype to the initial value. + type: list + elements: str + choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", + "passkey", ""] + required: false + aliases: ["ipauserauthtype"] + userclass: + description: + - User category + - (semantics placed on this attribute are for local interpretation) + type: list + elements: str + required: false + aliases: ["class"] + radius: + description: RADIUS proxy configuration + type: str + required: false + aliases: ["ipatokenradiusconfiglink"] + radiususer: + description: RADIUS proxy username + type: str + required: false + aliases: ["radiususername", "ipatokenradiususername"] + departmentnumber: + description: Department Number + type: list + elements: str + required: false + employeenumber: + description: Employee Number + type: str + required: false + employeetype: + description: Employee Type + type: str + required: false + smb_logon_script: + description: SMB logon script path + type: str + required: false + aliases: ["ipantlogonscript"] + smb_profile_path: + description: SMB profile path + type: str + required: false + aliases: ["ipantprofilepath"] + smb_home_dir: + description: SMB Home Directory + type: str + required: false + aliases: ["ipanthomedirectory"] + smb_home_drive: + description: SMB Home Directory Drive + type: str + required: false + choices: [ + 'A:', 'B:', 'C:', 'D:', 'E:', 'F:', 'G:', 'H:', 'I:', 'J:', + 'K:', 'L:', 'M:', 'N:', 'O:', 'P:', 'Q:', 'R:', 'S:', 'T:', + 'U:', 'V:', 'W:', 'X:', 'Y:', 'Z:', '' + ] + aliases: ["ipanthomedirectorydrive"] + preferredlanguage: + description: Preferred Language + type: str + required: false + idp: + description: External IdP configuration + type: str + required: false + aliases: ["ipaidpconfiglink"] + idp_user_id: + description: A string that identifies the user at external IdP + type: str + required: false + aliases: ["ipaidpsub"] + certificate: + description: List of base-64 encoded user certificates + type: list + elements: str + required: false + aliases: ["usercertificate"] + certmapdata: + description: + - List of certificate mappings + - Only usable with IPA versions 4.5 and up. + type: list + elements: dict + suboptions: + certificate: + description: Base-64 encoded user certificate + type: str + required: false + issuer: + description: Issuer of the certificate + type: str + required: false + subject: + description: Subject of the certificate + type: str + required: false + data: + description: Certmap data + type: str + required: false + required: false + noprivate: + description: Don't create user private group + required: false + type: bool + nomembers: + description: Suppress processing of membership attributes + required: false + type: bool + rename: + description: Rename the user object + required: false + type: str + aliases: ["new_name"] + required: false + first: + description: The first name. Required if user does not exist. + type: str + required: false + aliases: ["givenname"] + last: + description: The last name. Required if user doesnot exst. + type: str + required: false + aliases: ["sn"] + fullname: + description: The full name + type: str + required: false + aliases: ["cn"] + displayname: + description: The display name + type: str + required: false + initials: + description: Initials + type: str + required: false + homedir: + description: The home directory + type: str + required: false + gecos: + description: The GECOS + type: str + required: false + shell: + description: The login shell + type: str + required: false + aliases: ["loginshell"] + email: + description: List of email addresses + type: list + elements: str + required: false + principal: + description: The kerberos principal + type: list + elements: str + required: false + aliases: ["principalname", "krbprincipalname"] + principalexpiration: + description: | + The kerberos principal expiration date + (possible formats: YYYYMMddHHmmssZ, YYYY-MM-ddTHH:mm:ssZ, + YYYY-MM-ddTHH:mmZ, YYYY-MM-ddZ, YYYY-MM-dd HH:mm:ssZ, + YYYY-MM-dd HH:mmZ) The trailing 'Z' can be skipped. + type: str + required: false + aliases: ["krbprincipalexpiration"] + passwordexpiration: + description: | + The kerberos password expiration date (FreeIPA-4.7+) + (possible formats: YYYYMMddHHmmssZ, YYYY-MM-ddTHH:mm:ssZ, + YYYY-MM-ddTHH:mmZ, YYYY-MM-ddZ, YYYY-MM-dd HH:mm:ssZ, + YYYY-MM-dd HH:mmZ) The trailing 'Z' can be skipped. + Only usable with IPA versions 4.7 and up. + type: str + required: false + aliases: ["krbpasswordexpiration"] + password: + description: The user password + type: str + required: false + random: + description: Generate a random user password + required: false + type: bool + uid: + description: User ID Number (system will assign one if not provided) + type: int + required: false + aliases: ["uidnumber"] + gid: + description: Group ID Number + type: int + required: false + aliases: ["gidnumber"] + street: + description: Street address + type: str + required: false + city: + description: City + type: str + required: false + userstate: + description: State/Province + type: str + required: false + aliases: ["st"] + postalcode: + description: Postalcode/ZIP + type: str + required: false + aliases: ["zip"] + phone: + description: List of telephone numbers + type: list + elements: str + required: false + aliases: ["telephonenumber"] + mobile: + description: List of mobile telephone numbers + type: list + elements: str + required: false + pager: + description: List of pager numbers + type: list + elements: str + required: false + fax: + description: List of fax numbers + type: list + elements: str + required: false + aliases: ["facsimiletelephonenumber"] + orgunit: + description: Org. Unit + type: str + required: false + aliases: ["ou"] + title: + description: The job title + type: str + required: false + manager: + description: List of managers + type: list + elements: str + required: false + carlicense: + description: List of car licenses + type: list + elements: str + required: false + sshpubkey: + description: List of SSH public keys + required: false + type: list + elements: str + aliases: ["ipasshpubkey"] + userauthtype: + description: + List of supported user authentication types + Use empty string to reset userauthtype to the initial value. + type: list + elements: str + choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", + "passkey", ""] + required: false + aliases: ["ipauserauthtype"] + userclass: + description: + - User category + - (semantics placed on this attribute are for local interpretation) + type: list + elements: str + required: false + aliases: ["class"] + radius: + description: RADIUS proxy configuration + type: str + required: false + aliases: ["ipatokenradiusconfiglink"] + radiususer: + description: RADIUS proxy username + type: str + required: false + aliases: ["radiususername", "ipatokenradiususername"] + departmentnumber: + description: Department Number + type: list + elements: str + required: false + employeenumber: + description: Employee Number + type: str + required: false + employeetype: + description: Employee Type + type: str + required: false + smb_logon_script: + description: SMB logon script path + type: str + required: false + aliases: ["ipantlogonscript"] + smb_profile_path: + description: SMB profile path + type: str + required: false + aliases: ["ipantprofilepath"] + smb_home_dir: + description: SMB Home Directory + type: str + required: false + aliases: ["ipanthomedirectory"] + smb_home_drive: + description: SMB Home Directory Drive + type: str + required: false + choices: [ + 'A:', 'B:', 'C:', 'D:', 'E:', 'F:', 'G:', 'H:', 'I:', 'J:', + 'K:', 'L:', 'M:', 'N:', 'O:', 'P:', 'Q:', 'R:', 'S:', 'T:', + 'U:', 'V:', 'W:', 'X:', 'Y:', 'Z:', '' + ] + aliases: ["ipanthomedirectorydrive"] + preferredlanguage: + description: Preferred Language + type: str + required: false + idp: + description: External IdP configuration + type: str + required: false + aliases: ["ipaidpconfiglink"] + idp_user_id: + description: A string that identifies the user at external IdP + type: str + required: false + aliases: ["ipaidpsub"] + certificate: + description: List of base-64 encoded user certificates + type: list + elements: str + required: false + aliases: ["usercertificate"] + certmapdata: + description: + - List of certificate mappings + - Only usable with IPA versions 4.5 and up. + type: list + elements: dict + suboptions: + certificate: + description: Base-64 encoded user certificate + type: str + required: false + issuer: + description: Issuer of the certificate + type: str + required: false + subject: + description: Subject of the certificate + type: str + required: false + data: + description: Certmap data + type: str + required: false + required: false + noprivate: + description: Don't create user private group + required: false + type: bool + nomembers: + description: Suppress processing of membership attributes + required: false + type: bool + rename: + description: Rename the user object + required: false + type: str + aliases: ["new_name"] + preserve: + description: Delete a user, keeping the entry available for future use + required: false + type: bool + update_password: + description: + Set password for a user in present state only on creation or always + type: str + choices: ["always", "on_create"] + required: false + action: + description: Work on user or member level + type: str + default: "user" + choices: ["member", "user"] + state: + description: State to ensure + type: str + default: present + choices: ["present", "absent", + "enabled", "disabled", + "unlocked", "undeleted", + "renamed"] +author: + - Thomas Woerner (@t-woerner) +""" + +EXAMPLES = """ +# Create user pinky +- ipauser: + ipaadmin_password: SomeADMINpassword + name: pinky + first: pinky + last: Acme + uid: 10001 + gid: 100 + phone: "+555123457" + email: pinky@acme.com + passwordexpiration: "2023-01-19 23:59:59" + password: "no-brain" + update_password: on_create + +# Create user brain +- ipauser: + ipaadmin_password: SomeADMINpassword + name: brain + first: brain + last: Acme + +# Create multiple users pinky and brain +- ipauser: + ipaadmin_password: SomeADMINpassword + users: + - name: pinky + first: pinky + last: Acme + - name: brain + first: brain + last: Acme + +# Delete user pinky, but preserved +- ipauser: + ipaadmin_password: SomeADMINpassword + name: pinky + preserve: yes + state: absent + +# Undelete user pinky +- ipauser: + ipaadmin_password: SomeADMINpassword + name: pinky + state: undeleted + +# Disable user pinky +- ipauser: + ipaadmin_password: SomeADMINpassword + name: pinky,brain + state: disabled + +# Enable user pinky and brain +- ipauser: + ipaadmin_password: SomeADMINpassword + name: pinky,brain + state: enabled + +# Remove but preserve user pinky +- ipauser: + ipaadmin_password: SomeADMINpassword + users: + - name: pinky + preserve: yes + state: absent + +# Remove user pinky and brain +- ipauser: + ipaadmin_password: SomeADMINpassword + name: pinky,brain + state: disabled + +# Ensure a user has SMB attributes +- ipauser: + ipaadmin_password: SomeADMINpassword + name: smbuser + first: SMB + last: User + smb_logon_script: N:\\logonscripts\\startup + smb_profile_path: \\\\server\\profiles\\some_profile + smb_home_dir: \\\\users\\home\\smbuser + smb_home_drive: "U:" + +# Rename an existing user +- ipauser: + ipaadmin_password: SomeADMINpassword + name: someuser + rename: anotheruser + state: renamed +""" + +RETURN = """ +user: + description: User dict with random password + returned: If random is yes and user did not exist or update_password is yes + type: dict + contains: + randompassword: + description: The generated random password + type: str + returned: | + If only one user is handled by the module without using users parameter + name: + description: The user name of the user that got a new random password + returned: | + If several users are handled by the module with the users parameter + type: dict + contains: + randompassword: + description: The generated random password + type: str + returned: always +""" + + +from ansible_collections.freeipa.ansible_freeipa.plugins.module_utils.ansible_freeipa_module import \ + IPAAnsibleModule, compare_args_ipa, gen_add_del_lists, convert_date, \ + encode_certificate, load_cert_from_str, DN_x500_text, to_text, \ + ipalib_errors, gen_add_list, gen_intersection_list, \ + convert_input_certificates, date_string +from ansible_collections.linuxfabrik.lfops.plugins.module_utils.ipa_diff import \ + IPADiffTracker, gen_args_diff, gen_member_diff, merge_diffs +from ansible.module_utils import six +if six.PY3: + unicode = str + + +def find_user(module, name): + _args = { + "all": True, + } + + try: + _result = module.ipa_command("user_show", name, _args).get("result") + except ipalib_errors.NotFound: + return None + + # Convert datetime to proper string representation + for _expkey in ["krbpasswordexpiration", "krbprincipalexpiration"]: + if _expkey in _result: + _result[_expkey] = [date_string(x) for x in _result[_expkey]] + # Transform each principal to a string + _result["krbprincipalname"] = [ + to_text(x) for x in (_result.get("krbprincipalname") or []) + ] + _result["usercertificate"] = [ + encode_certificate(x) for x in (_result.get("usercertificate") or []) + ] + return _result + + +def gen_args(first, last, fullname, displayname, initials, homedir, gecos, + shell, email, principalexpiration, passwordexpiration, password, + random, uid, gid, street, city, userstate, postalcode, phone, + mobile, pager, fax, orgunit, title, carlicense, sshpubkey, + userauthtype, userclass, radius, radiususer, departmentnumber, + employeenumber, employeetype, preferredlanguage, smb_logon_script, + smb_profile_path, smb_home_dir, smb_home_drive, idp, idp_user_id, + noprivate, nomembers): + # principal, manager, certificate and certmapdata are handled not in here + _args = {} + if first is not None: + _args["givenname"] = first + if last is not None: + _args["sn"] = last + if fullname is not None: + _args["cn"] = fullname + if displayname is not None: + _args["displayname"] = displayname + if initials is not None: + _args["initials"] = initials + if homedir is not None: + _args["homedirectory"] = homedir + if gecos is not None: + _args["gecos"] = gecos + if shell is not None: + _args["loginshell"] = shell + if email is not None and len(email) > 0: + _args["mail"] = email + if principalexpiration is not None: + _args["krbprincipalexpiration"] = principalexpiration + if passwordexpiration is not None: + _args["krbpasswordexpiration"] = passwordexpiration + if password is not None: + _args["userpassword"] = password + if random is not None: + _args["random"] = random + if uid is not None: + _args["uidnumber"] = to_text(str(uid)) + if gid is not None: + _args["gidnumber"] = to_text(str(gid)) + if street is not None: + _args["street"] = street + if city is not None: + _args["l"] = city + if userstate is not None: + _args["st"] = userstate + if postalcode is not None: + _args["postalcode"] = postalcode + if phone is not None and len(phone) > 0: + _args["telephonenumber"] = phone + if mobile is not None and len(mobile) > 0: + _args["mobile"] = mobile + if pager is not None and len(pager) > 0: + _args["pager"] = pager + if fax is not None and len(fax) > 0: + _args["facsimiletelephonenumber"] = fax + if orgunit is not None: + _args["ou"] = orgunit + if title is not None: + _args["title"] = title + if carlicense is not None and len(carlicense) > 0: + _args["carlicense"] = carlicense + if sshpubkey is not None and len(sshpubkey) > 0: + _args["ipasshpubkey"] = sshpubkey + if userauthtype is not None and len(userauthtype) > 0: + _args["ipauserauthtype"] = userauthtype + if userclass is not None: + _args["userclass"] = userclass + if radius is not None: + _args["ipatokenradiusconfiglink"] = radius + if radiususer is not None: + _args["ipatokenradiususername"] = radiususer + if departmentnumber is not None: + _args["departmentnumber"] = departmentnumber + if employeenumber is not None: + _args["employeenumber"] = employeenumber + if employeetype is not None: + _args["employeetype"] = employeetype + if preferredlanguage is not None: + _args["preferredlanguage"] = preferredlanguage + if idp is not None: + _args["ipaidpconfiglink"] = idp + if idp_user_id is not None: + _args["ipaidpsub"] = idp_user_id + if noprivate is not None: + _args["noprivate"] = noprivate + if nomembers is not None: + _args["no_members"] = nomembers + if smb_logon_script is not None: + _args["ipantlogonscript"] = smb_logon_script + if smb_profile_path is not None: + _args["ipantprofilepath"] = smb_profile_path + if smb_home_dir is not None: + _args["ipanthomedirectory"] = smb_home_dir + if smb_home_drive is not None: + _args["ipanthomedirectorydrive"] = smb_home_drive + return _args + + +def check_parameters( # pylint: disable=unused-argument + module, state, action, first, last, fullname, displayname, initials, + homedir, gecos, shell, email, principal, principalexpiration, + passwordexpiration, password, random, uid, gid, street, city, phone, + mobile, pager, fax, orgunit, title, manager, carlicense, sshpubkey, + userauthtype, userclass, radius, radiususer, departmentnumber, + employeenumber, employeetype, preferredlanguage, certificate, + certmapdata, noprivate, nomembers, preserve, update_password, + smb_logon_script, smb_profile_path, smb_home_dir, smb_home_drive, + idp, ipa_user_id, rename +): + if state == "present" and action == "user": + invalid = ["preserve"] + else: + invalid = [ + "first", "last", "fullname", "displayname", "initials", "homedir", + "shell", "email", "principalexpiration", "passwordexpiration", + "password", "random", "uid", "gid", "street", "city", "phone", + "mobile", "pager", "fax", "orgunit", "title", "carlicense", + "sshpubkey", "userauthtype", "userclass", "radius", "radiususer", + "departmentnumber", "employeenumber", "employeetype", + "preferredlanguage", "noprivate", "nomembers", "update_password", + "gecos", "smb_logon_script", "smb_profile_path", "smb_home_dir", + "smb_home_drive", "idp", "idp_user_id" + ] + + if state == "present" and action == "member": + invalid.append("preserve") + else: + if action == "user": + invalid.extend( + ["principal", "manager", "certificate", "certmapdata"]) + + if state != "absent" and preserve is not None: + module.fail_json( + msg="Preserve is only possible for state=absent") + + if state != "renamed": + invalid.append("rename") + else: + invalid.extend([ + "preserve", "principal", "manager", "certificate", "certmapdata", + ]) + if not rename: + module.fail_json( + msg="A value for attribute 'rename' must be provided.") + if action == "member": + module.fail_json( + msg="Action member can not be used with state: renamed.") + + module.params_fail_used_invalid(invalid, state, action) + + if certmapdata is not None: + for x in certmapdata: + certificate = x.get("certificate") + issuer = x.get("issuer") + subject = x.get("subject") + data = x.get("data") + + if data is not None: + if certificate is not None or issuer is not None or \ + subject is not None: + module.fail_json( + msg="certmapdata: data can not be used with " + "certificate, issuer or subject") + check_certmapdata(data) + if certificate is not None \ + and (issuer is not None or subject is not None): + module.fail_json( + msg="certmapdata: certificate can not be used with " + "issuer or subject") + if data is None and certificate is None: + if issuer is None: + module.fail_json(msg="certmapdata: issuer is missing") + if subject is None: + module.fail_json(msg="certmapdata: subject is missing") + + +def check_userauthtype(module, userauthtype): + _invalid = module.ipa_command_invalid_param_choices( + "user_add", "ipauserauthtype", userauthtype) + if _invalid: + module.fail_json( + msg="The use of userauthtype '%s' is not supported " + "by your IPA version" % "','".join(_invalid)) + + +def extend_emails(email, default_email_domain): + if email is not None: + return ["%s@%s" % (_email, default_email_domain) + if "@" not in _email else _email + for _email in email] + return email + + +def convert_certmapdata(certmapdata): + if certmapdata is None: + return None + + _result = [] + for x in certmapdata: + certificate = x.get("certificate") + issuer = x.get("issuer") + subject = x.get("subject") + data = x.get("data") + + if data is None: + if issuer is None and subject is None: + cert = load_cert_from_str(certificate) + issuer = cert.issuer + subject = cert.subject + + _result.append("X509:%s%s" % (DN_x500_text(issuer), + DN_x500_text(subject))) + else: + _result.append(data) + + return _result + + +def check_certmapdata(data): + if not data.startswith("X509:"): + return False + + i = data.find("", 4) + s = data.find("", i) # pylint: disable=invalid-name + issuer = data[i + 3:s] + subject = data[s + 3:] + + if i < 0 or s < 0 or "CN" not in issuer or "CN" not in subject: + return False + + return True + + +def gen_certmapdata_args(certmapdata): + return {"ipacertmapdata": to_text(certmapdata)} + + +# pylint: disable=unused-argument +def result_handler(module, result, command, name, args, exit_args, + errors, single_user): + if "random" in args and command in ["user_add", "user_mod"] \ + and "randompassword" in result["result"]: + if single_user: + exit_args["randompassword"] = \ + result["result"]["randompassword"] + else: + exit_args.setdefault(name, {})["randompassword"] = \ + result["result"]["randompassword"] + + IPAAnsibleModule.member_error_handler(module, result, command, name, args, + errors) + + +def main(): + user_spec = dict( + # present + first=dict(type="str", aliases=["givenname"], default=None), + last=dict(type="str", aliases=["sn"], default=None), + fullname=dict(type="str", aliases=["cn"], default=None), + displayname=dict(type="str", default=None), + initials=dict(type="str", default=None), + homedir=dict(type="str", default=None), + gecos=dict(type="str", default=None), + shell=dict(type="str", aliases=["loginshell"], default=None), + email=dict(type="list", elements="str", default=None), + principal=dict(type="list", elements="str", + aliases=["principalname", "krbprincipalname"], + default=None), + principalexpiration=dict(type="str", + aliases=["krbprincipalexpiration"], + default=None), + passwordexpiration=dict(type="str", + aliases=["krbpasswordexpiration"], + default=None, no_log=False), + password=dict(type="str", default=None, no_log=True), + random=dict(type='bool', default=None), + uid=dict(type="int", aliases=["uidnumber"], default=None), + gid=dict(type="int", aliases=["gidnumber"], default=None), + street=dict(type="str", default=None), + city=dict(type="str", default=None), + userstate=dict(type="str", aliases=["st"], default=None), + postalcode=dict(type="str", aliases=["zip"], default=None), + phone=dict(type="list", elements="str", aliases=["telephonenumber"], + default=None), + mobile=dict(type="list", elements="str", default=None), + pager=dict(type="list", elements="str", default=None), + fax=dict(type="list", elements="str", + aliases=["facsimiletelephonenumber"], default=None), + orgunit=dict(type="str", aliases=["ou"], default=None), + title=dict(type="str", default=None), + manager=dict(type="list", elements="str", default=None), + carlicense=dict(type="list", elements="str", default=None), + sshpubkey=dict(type="list", elements="str", aliases=["ipasshpubkey"], + default=None), + userauthtype=dict(type='list', elements="str", + aliases=["ipauserauthtype"], default=None, + choices=["password", "radius", "otp", "pkinit", + "hardened", "idp", "passkey", ""]), + userclass=dict(type="list", elements="str", aliases=["class"], + default=None), + radius=dict(type="str", aliases=["ipatokenradiusconfiglink"], + default=None), + radiususer=dict(type="str", aliases=["radiususername", + "ipatokenradiususername"], + default=None), + departmentnumber=dict(type="list", elements="str", default=None), + employeenumber=dict(type="str", default=None), + employeetype=dict(type="str", default=None), + smb_logon_script=dict(type="str", default=None, + aliases=["ipantlogonscript"]), + smb_profile_path=dict(type="str", default=None, + aliases=["ipantprofilepath"]), + smb_home_dir=dict(type="str", default=None, + aliases=["ipanthomedirectory"]), + smb_home_drive=dict(type="str", default=None, + choices=[ + ("%c:" % chr(x)) + for x in range(ord('A'), ord('Z') + 1) + ] + [""], aliases=["ipanthomedirectorydrive"]), + preferredlanguage=dict(type="str", default=None), + certificate=dict(type="list", elements="str", + aliases=["usercertificate"], default=None), + certmapdata=dict(type="list", default=None, + options=dict( + # Here certificate is a simple string + certificate=dict(type="str", default=None), + issuer=dict(type="str", default=None), + subject=dict(type="str", default=None), + data=dict(type="str", default=None) + ), + elements='dict', required=False), + noprivate=dict(type='bool', default=None), + nomembers=dict(type='bool', default=None), + idp=dict(type="str", default=None, aliases=['ipaidpconfiglink']), + idp_user_id=dict(type="str", default=None, + aliases=['ipaidpsub']), + rename=dict(type="str", required=False, default=None, + aliases=["new_name"]), + ) + + ansible_module = IPAAnsibleModule( + argument_spec=dict( + # general + name=dict(type="list", elements="str", aliases=["login"], + default=None, required=False), + users=dict(type="list", + default=None, + options=dict( + # Here name is a simple string + name=dict(type="str", required=True, + aliases=["login"]), + # Add user specific parameters + **user_spec + ), + elements='dict', + required=False), + + # deleted + preserve=dict(required=False, type='bool', default=None), + + # mod + update_password=dict(type='str', default=None, no_log=False, + choices=['always', 'on_create']), + + # general + action=dict(type="str", default="user", + choices=["member", "user"]), + state=dict(type="str", default="present", + choices=["present", "absent", "enabled", "disabled", + "unlocked", "undeleted", "renamed"]), + + # Add user specific parameters for simple use case + **user_spec + ), + mutually_exclusive=[["name", "users"]], + required_one_of=[["name", "users"]], + supports_check_mode=True, + ) + + ansible_module._ansible_debug = True + + # Get parameters + + # general + names = ansible_module.params_get("name") + users = ansible_module.params_get("users") + + # present + first = ansible_module.params_get("first") + last = ansible_module.params_get("last") + fullname = ansible_module.params_get("fullname") + displayname = ansible_module.params_get("displayname") + initials = ansible_module.params_get("initials") + homedir = ansible_module.params_get("homedir") + gecos = ansible_module.params_get("gecos") + shell = ansible_module.params_get("shell") + email = ansible_module.params_get("email") + principal = ansible_module.params_get("principal") + principalexpiration = ansible_module.params_get( + "principalexpiration") + if principalexpiration is not None: + if principalexpiration[:-1] != "Z": + principalexpiration = principalexpiration + "Z" + principalexpiration = convert_date(principalexpiration) + passwordexpiration = ansible_module.params_get("passwordexpiration") + if passwordexpiration is not None: + if passwordexpiration[:-1] != "Z": + passwordexpiration = passwordexpiration + "Z" + passwordexpiration = convert_date(passwordexpiration) + password = ansible_module.params_get("password") + random = ansible_module.params_get("random") + uid = ansible_module.params_get("uid") + gid = ansible_module.params_get("gid") + street = ansible_module.params_get("street") + city = ansible_module.params_get("city") + userstate = ansible_module.params_get("userstate") + postalcode = ansible_module.params_get("postalcode") + phone = ansible_module.params_get("phone") + mobile = ansible_module.params_get("mobile") + pager = ansible_module.params_get("pager") + fax = ansible_module.params_get("fax") + orgunit = ansible_module.params_get("orgunit") + title = ansible_module.params_get("title") + manager = ansible_module.params_get("manager") + carlicense = ansible_module.params_get("carlicense") + sshpubkey = ansible_module.params_get("sshpubkey", + allow_empty_list_item=True) + userauthtype = ansible_module.params_get("userauthtype", + allow_empty_list_item=True) + userclass = ansible_module.params_get("userclass") + radius = ansible_module.params_get("radius") + radiususer = ansible_module.params_get("radiususer") + departmentnumber = ansible_module.params_get("departmentnumber") + employeenumber = ansible_module.params_get("employeenumber") + employeetype = ansible_module.params_get("employeetype") + preferredlanguage = ansible_module.params_get("preferredlanguage") + smb_logon_script = ansible_module.params_get("smb_logon_script") + smb_profile_path = ansible_module.params_get("smb_profile_path") + smb_home_dir = ansible_module.params_get("smb_home_dir") + smb_home_drive = ansible_module.params_get("smb_home_drive") + idp = ansible_module.params_get("idp") + idp_user_id = ansible_module.params_get("idp_user_id") + certificate = ansible_module.params_get("certificate") + certmapdata = ansible_module.params_get("certmapdata") + noprivate = ansible_module.params_get("noprivate") + nomembers = ansible_module.params_get("nomembers") + # deleted + preserve = ansible_module.params_get("preserve") + # mod + update_password = ansible_module.params_get("update_password") + # rename + rename = ansible_module.params_get("rename") + # general + action = ansible_module.params_get("action") + state = ansible_module.params_get("state") + + # Check parameters + + if (names is None or len(names) < 1) and \ + (users is None or len(users) < 1): + ansible_module.fail_json(msg="One of name and users is required") + + if state in ["present", "renamed"]: + if names is not None and len(names) != 1: + act = "renamed" if state == "renamed" else "added" + ansible_module.fail_json( + msg="Only one user can be %s at a time using name." % (act)) + + # Use users if names is None + if users is not None: + names = users + else: + check_parameters( + ansible_module, state, action, + first, last, fullname, displayname, initials, homedir, gecos, + shell, email, + principal, principalexpiration, passwordexpiration, password, + random, + uid, gid, street, city, phone, mobile, pager, fax, orgunit, title, + manager, carlicense, sshpubkey, userauthtype, userclass, radius, + radiususer, departmentnumber, employeenumber, employeetype, + preferredlanguage, certificate, certmapdata, noprivate, nomembers, + preserve, update_password, smb_logon_script, smb_profile_path, + smb_home_dir, smb_home_drive, idp, idp_user_id, rename, + ) + certificate = convert_input_certificates(ansible_module, certificate, + state) + certmapdata = convert_certmapdata(certmapdata) + + # Init + + changed = False + exit_args = {} + diff_tracker = IPADiffTracker() + + # Connect to IPA API + with ansible_module.ipa_connect(): + + # Check version specific settings + + server_realm = ansible_module.ipa_get_realm() + + # Check API specific parameters + + check_userauthtype(ansible_module, userauthtype) + + # Default email domain + + result = ansible_module.ipa_command_no_name("config_show", {}) + default_email_domain = result["result"]["ipadefaultemaildomain"][0] + + # Extend email addresses + + email = extend_emails(email, default_email_domain) + + # commands + + commands = [] + user_set = set() + + for user in names: + if isinstance(user, dict): + name = user.get("name") + if name in user_set: + ansible_module.fail_json( + msg="user '%s' is used more than once" % name) + user_set.add(name) + # present + first = user.get("first") + last = user.get("last") + fullname = user.get("fullname") + displayname = user.get("displayname") + initials = user.get("initials") + homedir = user.get("homedir") + gecos = user.get("gecos") + shell = user.get("shell") + email = user.get("email") + principal = user.get("principal") + principalexpiration = user.get("principalexpiration") + if principalexpiration is not None: + if principalexpiration[:-1] != "Z": + principalexpiration = principalexpiration + "Z" + principalexpiration = convert_date(principalexpiration) + passwordexpiration = user.get("passwordexpiration") + if passwordexpiration is not None: + if passwordexpiration[:-1] != "Z": + passwordexpiration = passwordexpiration + "Z" + passwordexpiration = convert_date(passwordexpiration) + password = user.get("password") + random = user.get("random") + uid = user.get("uid") + gid = user.get("gid") + street = user.get("street") + city = user.get("city") + userstate = user.get("userstate") + postalcode = user.get("postalcode") + phone = user.get("phone") + mobile = user.get("mobile") + pager = user.get("pager") + fax = user.get("fax") + orgunit = user.get("orgunit") + title = user.get("title") + manager = user.get("manager") + carlicense = user.get("carlicense") + sshpubkey = user.get("sshpubkey") + userauthtype = user.get("userauthtype") + userclass = user.get("userclass") + radius = user.get("radius") + radiususer = user.get("radiususer") + departmentnumber = user.get("departmentnumber") + employeenumber = user.get("employeenumber") + employeetype = user.get("employeetype") + preferredlanguage = user.get("preferredlanguage") + smb_logon_script = user.get("smb_logon_script") + smb_profile_path = user.get("smb_profile_path") + smb_home_dir = user.get("smb_home_dir") + smb_home_drive = user.get("smb_home_drive") + idp = user.get("idp") + idp_user_id = user.get("idp_user_id") + rename = user.get("rename") + certificate = user.get("certificate") + certmapdata = user.get("certmapdata") + noprivate = user.get("noprivate") + nomembers = user.get("nomembers") + + check_parameters( + ansible_module, state, action, + first, last, fullname, displayname, initials, homedir, + gecos, shell, email, principal, principalexpiration, + passwordexpiration, password, random, uid, gid, street, + city, phone, mobile, pager, fax, orgunit, title, manager, + carlicense, sshpubkey, userauthtype, userclass, radius, + radiususer, departmentnumber, employeenumber, + employeetype, preferredlanguage, certificate, + certmapdata, noprivate, nomembers, preserve, + update_password, smb_logon_script, smb_profile_path, + smb_home_dir, smb_home_drive, idp, idp_user_id, rename, + ) + certificate = convert_input_certificates(ansible_module, + certificate, state) + certmapdata = convert_certmapdata(certmapdata) + + # Check API specific parameters + + check_userauthtype(ansible_module, userauthtype) + + # Extend email addresses + + email = extend_emails(email, default_email_domain) + + elif ( + isinstance( + user, (str, unicode) # pylint: disable=W0012,E0606 + ) + ): + name = user + else: + ansible_module.fail_json(msg="User '%s' is not valid" % + repr(user)) + + # Fix principals: add realm if missing + # We need the connected API for the realm, therefore it can not + # be part of check_parameters as this is used also before the + # connection to the API has been established. + if principal is not None: + principal = [x if "@" in x else x + "@" + server_realm + for x in principal] + + # Check passwordexpiration availability. + # We need the connected API for this test, therefore it can not + # be part of check_parameters as this is used also before the + # connection to the API has been established. + if passwordexpiration is not None and \ + not ansible_module.ipa_command_param_exists( + "user_add", "krbpasswordexpiration"): + ansible_module.fail_json( + msg="The use of passwordexpiration is not supported by " + "your IPA version") + + # Check certmapdata availability. + # We need the connected API for this test, therefore it can not + # be part of check_parameters as this is used also before the + # connection to the API has been established. + if certmapdata is not None and \ + not ansible_module.ipa_command_exists("user_add_certmapdata"): + ansible_module.fail_json( + msg="The use of certmapdata is not supported by " + "your IPA version") + + # Check if SMB attributes are available + if ( + any([ + smb_logon_script, smb_profile_path, smb_home_dir, + smb_home_drive + ]) + and not ansible_module.ipa_command_param_exists( + "user_mod", "ipanthomedirectory" + ) + ): + ansible_module.fail_json( + msg="The use of smb_logon_script, smb_profile_path, " + "smb_profile_path, and smb_home_drive is not supported " + "by your IPA version") + + # Check if IdP support is available + require_idp = ( + idp is not None + or idp_user_id is not None + or userauthtype == "idp" + ) + has_idp_support = ansible_module.ipa_command_param_exists( + "user_add", "ipaidpconfiglink" + ) + if require_idp and not has_idp_support: + ansible_module.fail_json( + msg="Your IPA version does not support External IdP.") + + # Make sure user exists + res_find = find_user(ansible_module, name) + res_find_orig = res_find + + # Create command + if state == "present": + # Generate args + args = gen_args( + first, last, fullname, displayname, initials, homedir, + gecos, + shell, email, principalexpiration, passwordexpiration, + password, random, uid, gid, street, city, userstate, + postalcode, phone, mobile, pager, fax, orgunit, title, + carlicense, sshpubkey, userauthtype, userclass, radius, + radiususer, departmentnumber, employeenumber, employeetype, + preferredlanguage, smb_logon_script, smb_profile_path, + smb_home_dir, smb_home_drive, idp, idp_user_id, noprivate, + nomembers, + ) + attr_before, attr_after = {}, {} + + if action == "user": + # Found the user + if res_find is not None: + # Ignore password and random with + # update_password == on_create + if update_password == "on_create": + if "userpassword" in args: + del args["userpassword"] + if "random" in args: + del args["random"] + # if using "random:false" password should not be + # generated. + if not args.get("random", True): + del args["random"] + if "noprivate" in args: + del args["noprivate"] + + # For all settings is args, check if there are + # different settings in the find result. + # If yes: modify + # The nomembers parameter is added to args for the + # api command. But no_members is never part of + # res_find from user-show, therefore this parameter + # needs to be ignored in compare_args_ipa. + if not compare_args_ipa( + ansible_module, args, res_find, + ignore=["no_members"]): + commands.append([name, "user_mod", args]) + attr_before, attr_after = gen_args_diff( + args, res_find, + ignore=["no_members"]) + # Mask password in diff + if "userpassword" in attr_before: + attr_before["userpassword"] = "********" + if "userpassword" in attr_after: + attr_after["userpassword"] = "********" + + else: + # Make sure we have a first and last name + if first is None: + ansible_module.fail_json( + msg="First name is needed") + if last is None: + ansible_module.fail_json( + msg="Last name is needed") + + smb_attrs = { + k: args[k] + for k in [ + "ipanthomedirectory", + "ipanthomedirectorydrive", + "ipantlogonscript", + "ipantprofilepath", + ] + if k in args + } + for key in smb_attrs.keys(): + del args[key] + commands.append([name, "user_add", args]) + _diff_args = dict(args) + if "userpassword" in _diff_args: + _diff_args["userpassword"] = "********" + attr_before, attr_after = {}, _diff_args + if smb_attrs: + commands.append([name, "user_mod", smb_attrs]) + # Handle members: principal, manager, certificate and + # certmapdata + if res_find is not None: + # Generate addition and removal lists + manager_add, manager_del = gen_add_del_lists( + manager, res_find.get("manager")) + + principal_add, principal_del = gen_add_del_lists( + principal, res_find.get("krbprincipalname")) + # Principals are not returned as utf8 for IPA using + # python2 using user_find, therefore we need to + # convert the principals that we should remove. + principal_del = [to_text(x) for x in principal_del] + + certificate_add, certificate_del = gen_add_del_lists( + certificate, res_find.get("usercertificate")) + + certmapdata_add, certmapdata_del = gen_add_del_lists( + certmapdata, res_find.get("ipacertmapdata")) + + else: + # Use given managers and principals + manager_add = manager or [] + manager_del = [] + principal_add = principal or [] + principal_del = [] + certificate_add = certificate or [] + certificate_del = [] + certmapdata_add = certmapdata or [] + certmapdata_del = [] + + # Remove canonical principal from principal_del + canonical_principal = name + "@" + server_realm + if canonical_principal in principal_del: + principal_del.remove(canonical_principal) + + # Add managers + if len(manager_add) > 0: + commands.append([name, "user_add_manager", + { + "user": manager_add, + }]) + # Remove managers + if len(manager_del) > 0: + commands.append([name, "user_remove_manager", + { + "user": manager_del, + }]) + + # Principals need to be added and removed one by one, + # because if entry already exists, the processing of + # the remaining enries is stopped. The same applies to + # the removal of non-existing entries. + + # Add principals + if len(principal_add) > 0: + for _principal in principal_add: + commands.append([name, "user_add_principal", + { + "krbprincipalname": + _principal, + }]) + # Remove principals + if len(principal_del) > 0: + for _principal in principal_del: + commands.append([name, "user_remove_principal", + { + "krbprincipalname": + _principal, + }]) + + # Certificates need to be added and removed one by one, + # because if entry already exists, the processing of + # the remaining enries is stopped. The same applies to + # the removal of non-existing entries. + + # Add certificates + if len(certificate_add) > 0: + for _certificate in certificate_add: + commands.append([name, "user_add_cert", + { + "usercertificate": + _certificate, + }]) + # Remove certificates + if len(certificate_del) > 0: + for _certificate in certificate_del: + commands.append([name, "user_remove_cert", + { + "usercertificate": + _certificate, + }]) + + # certmapdata need to be added and removed one by one, + # because issuer and subject can only be done one by + # one reliably (https://pagure.io/freeipa/issue/8097) + + # Add certmapdata + if len(certmapdata_add) > 0: + for _data in certmapdata_add: + commands.append([name, "user_add_certmapdata", + gen_certmapdata_args(_data)]) + # Remove certmapdata + if len(certmapdata_del) > 0: + for _data in certmapdata_del: + commands.append([name, "user_remove_certmapdata", + gen_certmapdata_args(_data)]) + + # Diff tracking + before, after = merge_diffs( + (attr_before, attr_after), + gen_member_diff( + "manager", manager_add, manager_del, + (res_find_orig or {}).get("manager")), + gen_member_diff( + "principal", principal_add, principal_del, + (res_find_orig or {}).get("krbprincipalname")), + gen_member_diff( + "certificate", certificate_add, certificate_del, + (res_find_orig or {}).get("usercertificate")), + ) + diff_tracker.add_entry_diff(name, before, after) + + elif action == "member": + if res_find is None: + ansible_module.fail_json( + msg="No user '%s'" % name) + + # Ensure managers are present + manager_add = gen_add_list( + manager, res_find.get("manager")) + if manager_add is not None and len(manager_add) > 0: + commands.append([name, "user_add_manager", + { + "user": manager_add, + }]) + + # Principals need to be added and removed one by one, + # because if entry already exists, the processing of + # the remaining enries is stopped. The same applies to + # the removal of non-existing entries. + + # Ensure principals are present + principal_add = gen_add_list( + principal, res_find.get("krbprincipalname")) + if principal_add is not None and len(principal_add) > 0: + for _principal in principal_add: + commands.append([name, "user_add_principal", + { + "krbprincipalname": + _principal, + }]) + + # Certificates need to be added and removed one by one, + # because if entry already exists, the processing of + # the remaining enries is stopped. The same applies to + # the removal of non-existing entries. + + # Ensure certificates are present + certificate_add = gen_add_list( + certificate, res_find.get("usercertificate")) + if certificate_add is not None and \ + len(certificate_add) > 0: + for _certificate in certificate_add: + commands.append([name, "user_add_cert", + { + "usercertificate": + _certificate, + }]) + + # certmapdata need to be added and removed one by one, + # because issuer and subject can only be done one by + # one reliably (https://pagure.io/freeipa/issue/8097) + + # Ensure certmapdata are present + certmapdata_add = gen_add_list( + certmapdata, res_find.get("ipacertmapdata")) + if certmapdata_add is not None and \ + len(certmapdata_add) > 0: + for _data in certmapdata_add: + commands.append([name, "user_add_certmapdata", + gen_certmapdata_args(_data)]) + + # Diff tracking + before, after = merge_diffs( + gen_member_diff( + "manager", manager_add, [], + (res_find or {}).get("manager")), + gen_member_diff( + "principal", principal_add, [], + (res_find or {}).get("krbprincipalname")), + gen_member_diff( + "certificate", certificate_add, [], + (res_find or {}).get("usercertificate")), + ) + diff_tracker.add_entry_diff(name, before, after) + + elif state == "absent": + if action == "user": + if res_find is not None: + args = {} + if preserve is not None: + args["preserve"] = preserve + if ( + not res_find.get("preserved", False) + or not args.get("preserve", False) + ): + commands.append([name, "user_del", args]) + diff_tracker.add_entry_diff( + name, + {"state": "present"}, {"state": "absent"}) + elif action == "member": + if res_find is None: + ansible_module.fail_json( + msg="No user '%s'" % name) + + # Ensure managers are absent + manager_del = gen_intersection_list( + manager, res_find.get("manager")) + if manager_del is not None and len(manager_del) > 0: + commands.append([name, "user_remove_manager", + { + "user": manager_del, + }]) + + # Principals need to be added and removed one by one, + # because if entry already exists, the processing of + # the remaining enries is stopped. The same applies to + # the removal of non-existing entries. + + # Ensure principals are absent + principal_del = gen_intersection_list( + principal, res_find.get("krbprincipalname")) + if principal_del is not None and len(principal_del) > 0: + commands.append([name, "user_remove_principal", + { + "krbprincipalname": principal_del, + }]) + + # Certificates need to be added and removed one by one, + # because if entry already exists, the processing of + # the remaining enries is stopped. The same applies to + # the removal of non-existing entries. + + # Ensure certificates are absent + certificate_del = gen_intersection_list( + certificate, res_find.get("usercertificate")) + if certificate_del is not None and \ + len(certificate_del) > 0: + for _certificate in certificate_del: + commands.append([name, "user_remove_cert", + { + "usercertificate": + _certificate, + }]) + + # certmapdata need to be added and removed one by one, + # because issuer and subject can only be done one by + # one reliably (https://pagure.io/freeipa/issue/8097) + + # Ensure certmapdata are absent + certmapdata_del = gen_intersection_list( + certmapdata, res_find.get("ipacertmapdata")) + if certmapdata_del is not None and \ + len(certmapdata_del) > 0: + # Using issuer and subject can only be done one by + # one reliably (https://pagure.io/freeipa/issue/8097) + for _data in certmapdata_del: + commands.append([name, "user_remove_certmapdata", + gen_certmapdata_args(_data)]) + + # Diff tracking + before, after = merge_diffs( + gen_member_diff( + "manager", [], manager_del or [], + (res_find or {}).get("manager")), + gen_member_diff( + "principal", [], principal_del or [], + (res_find or {}).get("krbprincipalname")), + gen_member_diff( + "certificate", [], certificate_del or [], + (res_find or {}).get("usercertificate")), + ) + diff_tracker.add_entry_diff(name, before, after) + + elif state == "undeleted": + if res_find is not None: + if res_find.get("preserved", False): + commands.append([name, "user_undel", {}]) + else: + raise ValueError("No user '%s'" % name) + + elif state == "enabled": + if res_find is not None: + if res_find["nsaccountlock"]: + commands.append([name, "user_enable", {}]) + diff_tracker.add_entry_diff( + name, {"enabled": False}, {"enabled": True}) + else: + raise ValueError("No user '%s'" % name) + + elif state == "disabled": + if res_find is not None: + if not res_find["nsaccountlock"]: + commands.append([name, "user_disable", {}]) + diff_tracker.add_entry_diff( + name, {"enabled": True}, {"enabled": False}) + else: + raise ValueError("No user '%s'" % name) + + elif state == "unlocked": + if res_find is not None: + commands.append([name, "user_unlock", {}]) + else: + raise ValueError("No user '%s'" % name) + + elif state == "renamed": + if res_find is None: + ansible_module.fail_json(msg="No user '%s'" % name) + else: + if rename != name: + commands.append([name, 'user_mod', {"rename": rename}]) + diff_tracker.add_entry_diff( + name, {"uid": name}, {"uid": rename}) + else: + ansible_module.fail_json(msg="Unkown state '%s'" % state) + + del user_set + + # Execute commands + + changed = ansible_module.execute_ipa_commands( + commands, result_handler, batch=True, keeponly=["randompassword"], + exit_args=exit_args, single_user=users is None) + + # Done + _exit_kwargs = diff_tracker.build_diff() + ansible_module.exit_json(changed=changed, user=exit_args, **_exit_kwargs) + + +if __name__ == "__main__": + main() diff --git a/roles/acme_sh/README.md b/roles/acme_sh/README.md index 1db0e5fcd..776164aaf 100644 --- a/roles/acme_sh/README.md +++ b/roles/acme_sh/README.md @@ -10,6 +10,7 @@ SSLCertificateKeyFile /etc/pki/tls/private/www.example.com.key SSLCertificateChainFile /etc/pki/tls/certs/www.example.com-chain.crt ``` + ## Mandatory Requirements * Install `openssl`. This can be done using the [linuxfabrik.lfops.apps](https://github.com/Linuxfabrik/lfops/tree/main/roles/apps) role. @@ -30,19 +31,53 @@ If you use the [acme.sh Playbook](https://github.com/Linuxfabrik/lfops/blob/main ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `acme_sh` | Installs acme.sh and issues certificates | - | -| `acme_sh:certificates` | Issues certificates | - | -| `acme_sh:state` | Manages the state of the weekly acme.sh timer | - | +`acme_sh` + +* Installs acme.sh and issues certificates. +* Triggers: none. + +`acme_sh:certificates` + +* Issues certificates. +* Triggers: none. + +`acme_sh:state` + +* Manages the state of the weekly acme.sh timer. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `acme_sh__account_email` | Email address for the Let's encrypt account. This address will receive expiry emails. | -| `acme_sh__certificates` | List of certificates that should be issued. Subkeys:
  • `name`: Mandatory, string. Domain of the certificate.
  • `alternative_names`: Optional, list. Subject Alternative Names (SAN) for the certificate. Defaults to unset.
  • `reload_cmd`: Optional, string. Command to execute after issue/renew to reload the server. Defaults to `systemctl reload httpd`.
| +`acme_sh__account_email` + +* Email address for the Let's encrypt account. This address will receive expiry emails. +* Type: String. +* Default: none + +`acme_sh__certificates` + +* List of certificates that should be issued. +* Type: List of dictionaries. +* Default: none +* Subkeys: + + * `name`: + + * Mandatory. Domain of the certificate. + * Type: String. + + * `alternative_names`: + + * Optional. Subject Alternative Names (SAN) for the certificate. + * Type: List. + * Default: unset + + * `reload_cmd`: + + * Optional. Command to execute after issue/renew to reload the server. + * Type: String. + * Default: `'systemctl reload httpd'` Example: ```yaml @@ -59,15 +94,47 @@ acme_sh__certificates: ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `acme_sh__deploy_to_host` | The host which the issued certificates should be deployed to. | unset | -| `acme_sh__deploy_to_host_hook` | The deployment hook which should be used to deploy the certificates to the deploy host. | `ssh` | -| `acme_sh__deploy_to_host_reload_cmd` | The reload command which should be executed on the deploy host after the certificates were deployed to the deploy host. | `reload_cmd` subkey of the `acme_sh__certificates` item, or `systemctl reload httpd` | -| `acme_sh__deploy_to_host_user` | The remote user account which should be used to deploy the certificates to the deploy host. | `root` | -| `acme_sh__key_length` | Key length in bits of the certificates to issue. | `4096` | -| `acme_sh__reload_cmd` | The reload command which should be executed on the local host after the certificates were installed. | `reload_cmd` subkey of the `acme_sh__certificates` item, or `systemctl reload httpd` | -| `acme_sh__timer_enabled` | Enables or disables the weekly acme.sh timer, analogous to `systemctl enable/disable --now`. | `true` | +`acme_sh__deploy_to_host` + +* The host which the issued certificates should be deployed to. +* Type: String. +* Default: unset + +`acme_sh__deploy_to_host_hook` + +* The deployment hook which should be used to deploy the certificates to the deploy host. +* Type: String. +* Default: `'ssh'` + +`acme_sh__deploy_to_host_reload_cmd` + +* The reload command which should be executed on the deploy host after the certificates were deployed to the deploy host. +* Type: String. +* Default: `reload_cmd` subkey of the `acme_sh__certificates` item, or `'systemctl reload httpd'` + +`acme_sh__deploy_to_host_user` + +* The remote user account which should be used to deploy the certificates to the deploy host. +* Type: String. +* Default: `'root'` + +`acme_sh__key_length` + +* Key length in bits of the certificates to issue. +* Type: Number. +* Default: `4096` + +`acme_sh__reload_cmd` + +* The reload command which should be executed on the local host after the certificates were installed. +* Type: String. +* Default: `reload_cmd` subkey of the `acme_sh__certificates` item, or `'systemctl reload httpd'` + +`acme_sh__timer_enabled` + +* Enables or disables the weekly acme.sh timer, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` Example: ```yaml diff --git a/roles/acme_sh/tasks/main.yml b/roles/acme_sh/tasks/main.yml index d9828160f..a8355c65e 100644 --- a/roles/acme_sh/tasks/main.yml +++ b/roles/acme_sh/tasks/main.yml @@ -29,6 +29,7 @@ - name: 'Deploy /etc/systemd/system/acme-sh.service' ansible.builtin.template: + backup: true src: 'etc/systemd/system/acme-sh.service.j2' dest: '/etc/systemd/system/acme-sh.service' owner: 'root' @@ -37,6 +38,7 @@ - name: 'Deploy /etc/systemd/system/acme-sh.timer' ansible.builtin.template: + backup: true src: 'etc/systemd/system/acme-sh.timer.j2' dest: '/etc/systemd/system/acme-sh.timer' owner: 'root' diff --git a/roles/alternatives/README.md b/roles/alternatives/README.md index b7b38cbbb..9c3854015 100644 --- a/roles/alternatives/README.md +++ b/roles/alternatives/README.md @@ -11,16 +11,46 @@ Hints: ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `alternatives` | Manages alternative programs for common commands. | - | +`alternatives` + +* Manages alternative programs for common commands. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `alternatives__alternatives` | List of alternatives to remove or to deploy. Subkeys:
  • `link`: String. The path to the symbolic link that should point to the real executable. This option is always required on RHEL-based distributions. On Debian-based distributions this option is required when the alternative `name` is unknown to the system.
  • `name`: Mandatory, string. The generic name of the link.
  • `path`: Optional, string. The path to the real executable that the link should point to.
  • `priority`: Optional, number. The priority of the alternative. If no priority is given for creation `50` is used as a fallback.
  • `state`: Optional, string. One of:
    • `selected`: Default. Install the alternative (if not already installed), and set it as the currently selected alternative for the group.
    • `present`: Install the alternative (if not already installed), but do not set it as the currently selected alternative for the group.
    • `auto`: Install the alternative (if not already installed), and set the group to auto mode.
    • `absent`: Remove the alternative.
| `[]` | +`alternatives__alternatives` + +* List of alternatives to remove or to deploy. +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `link`: + + * The path to the symbolic link that should point to the real executable. This option is always required on RHEL-based distributions. On Debian-based distributions this option is required when the alternative `name` is unknown to the system. + * Type: String. + + * `name`: + + * Mandatory. The generic name of the link. + * Type: String. + + * `path`: + + * Optional. The path to the real executable that the link should point to. + * Type: String. + + * `priority`: + + * Optional. The priority of the alternative. If no priority is given for creation `50` is used as a fallback. + * Type: Number. + + * `state`: + + * Optional. One of: `selected` (default; install the alternative and set it as the currently selected alternative for the group), `present` (install the alternative but do not set it as the currently selected alternative for the group), `auto` (install the alternative and set the group to auto mode), `absent` (remove the alternative). + * Type: String. + * Default: `'selected'` Example: ```yaml diff --git a/roles/ansible_init/README.md b/roles/ansible_init/README.md index 2331ef1a3..72d1b343a 100644 --- a/roles/ansible_init/README.md +++ b/roles/ansible_init/README.md @@ -14,20 +14,36 @@ This role: ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `ansible_init` | * Update ansible_init (ansinv) itself
* Update ../lfops
* Load repo list
* Clone the inventories
* Install ansible collections | - | -| `ansible_init:collections` | Install ansible collections | - | -| `ansible_init:command` | Load repo list | - | +`ansible_init` + +* Update ansible_init (ansinv) itself. +* Update ../lfops. +* Load repo list. +* Clone the inventories. +* Install ansible collections. +* Triggers: none. + +`ansible_init:collections` + +* Install ansible collections. +* Triggers: none. + +`ansible_init:command` + +* Load repo list. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `ansible_init__url` | URL of the ansinv repo. | +`ansible_init__url` + +* URL of the ansinv repo. +* Type: String. +* Default: none Example: + ```yaml # mandatory ansible_init__url: 'git@example.com:my-ansinv.git' @@ -36,15 +52,87 @@ ansible_init__url: 'git@example.com:my-ansinv.git' ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `ansible_init__ansible_collections` | List of dictionaries of Ansible collections to install. Subkeys:
* `name`: Mandatory, string. Name of the collection.
* `type`. Optional. Defaults to `collection`. One of `collection`, `role`, or `both`. | `[]` | -| `ansible_init__inventories` | List of dictionaries of inventories to clone. Subkeys:
* `name`: Mandatory, string. Name of the inventory. Will be used as the folder name.
* `url`: Mandatory, string. Git, SSH, or HTTP(S) protocol address of the repository.
* `version`: Optional, string. Defaults to `'main'`. Git version to checkout. | `[]` | -| `ansible_init__lfops_url` | URL of the LFOps repo. Either `'git@github.com:Linuxfabrik/lfops.git'` for development purposes, or `'https://github.com/Linuxfabrik/lfops.git'` for general access. | `'https://github.com/Linuxfabrik/lfops.git'` | -| `ansible_init__roles` | List of dictionaries of roles to clone. Subkeys:
* `name`: Mandatory, string. Name of the role. Will be used as the folder name.
* `url`: Mandatory, string. Git, SSH, or HTTP(S) protocol address of the repository.
* `version`: Optional, string. Defaults to `'main'`. Git version to checkout. | `[]` | -| `ansible_init__version` | Git version of the ansinv repo to checkout. | `'main'` | +`ansible_init__ansible_collections` + +* List of dictionaries of Ansible collections to install. +* Subkeys: + + * `name`: + + * Mandatory. Name of the collection. + * Type: String. + + * `type`: + + * Optional. One of `collection`, `role`, or `both`. + * Type: String. + * Default: `'collection'` + +* Type: List of dictionaries. +* Default: `[]` + +`ansible_init__inventories` + +* List of dictionaries of inventories to clone. +* Subkeys: + + * `name`: + + * Mandatory. Name of the inventory. Will be used as the folder name. + * Type: String. + + * `url`: + + * Mandatory. Git, SSH, or HTTP(S) protocol address of the repository. + * Type: String. + + * `version`: + + * Optional. Git version to checkout. + * Type: String. + * Default: `'main'` + +* Type: List of dictionaries. +* Default: `[]` + +`ansible_init__lfops_url` + +* URL of the LFOps repo. Either `'git@github.com:Linuxfabrik/lfops.git'` for development purposes, or `'https://github.com/Linuxfabrik/lfops.git'` for general access. +* Type: String. +* Default: `'https://github.com/Linuxfabrik/lfops.git'` + +`ansible_init__roles` + +* List of dictionaries of roles to clone. +* Subkeys: + + * `name`: + + * Mandatory. Name of the role. Will be used as the folder name. + * Type: String. + + * `url`: + + * Mandatory. Git, SSH, or HTTP(S) protocol address of the repository. + * Type: String. + + * `version`: + + * Optional. Git version to checkout. + * Type: String. + * Default: `'main'` + +* Type: List of dictionaries. +* Default: `[]` + +`ansible_init__version` + +* Git version of the ansinv repo to checkout. +* Type: String. +* Default: `'main'` Example: + ```yaml # optional ansible_init__ansible_collections: diff --git a/roles/apache_httpd/README.md b/roles/apache_httpd/README.md index a31c139ab..05b57b80b 100644 --- a/roles/apache_httpd/README.md +++ b/roles/apache_httpd/README.md @@ -5,7 +5,7 @@ This role installs and configures a CIS-compliant [Apache httpd](https://httpd.a ## What this Role does -This role configures Apache in the same way as is usual on Debian systems, so quite different to upstream's suggested way to configure the web server. This is because this role attempts to make adding and removing mods, virtual hosts, and extra configuration directives as flexible as possible, in order to make automating the changes and administering the server as easy as possible. +This role configures Apache using a Debian-style layout with `conf-available/conf-enabled`, `mods-available/mods-enabled`, and `sites-available/sites-enabled` directories. On Red Hat-based systems, this means a significant restructuring of the default Apache configuration. The goal is to make adding and removing mods, virtual hosts, and extra configuration directives as flexible as possible, regardless of the underlying platform. The config is split into several files forming the configuration hierarchy outlined below, all located in the `/etc/httpd/` directory: @@ -20,7 +20,7 @@ The config is split into several files forming the configuration hierarchy outli `-- sites-enabled/ ``` -We try to avoid using `` in the global Apache configuration as well as in the configuration of the vhosts as much as possible in order to facilitate debugging. Otherwise, when using ``, configuration options are silently dropped, and their absence is very difficult to notice. +We avoid using `` in vHost definitions and in the global `httpd.conf` to facilitate debugging. Without ``, a missing module causes a clear startup error instead of silently dropping configuration. `` is only used in `mods-available/` and `conf-available/` where it is necessary to guard module-specific configuration. For flexibility, use the `raw` variable to configure the following topics (have a look at the "Apache vHost Configs" section for some examples): @@ -41,9 +41,21 @@ If you want to check Apache with [our STIG audit script](https://github.com/Linu * SELinux: Use specialized roles to set specific SELinux Booleans, Policies etc. +## Platform-Specific Behavior + +This role supports both Red Hat and Debian-based systems. Paths and service names differ between platforms: + +* Config path: Red Hat `/etc/httpd`, Debian `/etc/apache2` +* Service name: Red Hat `httpd`, Debian `apache2` +* User/Group: Red Hat `apache`/`apache`, Debian `www-data`/`www-data` +* PHP-FPM socket: Red Hat `/run/php-fpm/www.sock`, Debian `/run/php/www.sock` + +These differences are handled automatically. All documentation below uses Red Hat paths. + + ## Config Examples for vHosts -[Have a look here](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/EXAMPLES.md). +See [EXAMPLES.md](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/EXAMPLES.md). @@ -61,28 +73,87 @@ If you want to check Apache with [our STIG audit script](https://github.com/Linu ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `apache_httpd` | * Installs and configures apache_httpd | Reloads httpd.service | -| `apache_httpd:config` | * Creates or updates global Apache configuration
* Removes conf-available configs
* Creates conf-available configs
* Disables configs
* Enables configs | Reloads httpd.service | -| `apache_httpd:htpasswd` | Manages htpasswd files. | - | -| `apache_httpd:matomo` | * Deploys Matomo Log Analytics Python Script | - | -| `apache_httpd:mod_security_coreruleset` | * Downloads, verifies and installs OWASP ModSecurity Core Rule Set (CRS)
* Installs tar
* Unarchives the CRS
* Links the CRS | - | -| `apache_httpd:mods` | * Removes mods-available configs
* Create mods-available configs
* Disable mods
* Enable mods | Reloads httpd.service | -| `apache_httpd:state` | * Ensures that httpd service is in a desired state | - | -| `apache_httpd:vhosts` | * Removes sites-available vHosts
* Creates sites-available vHosts
* Creates DocumentRoot for all vHosts
* Disables vHosts
* Enables vHosts | Reloads httpd.service | +`apache_httpd` + +* Installs base packages and Apache packages/modules. +* Creates the `conf-available/conf-enabled`, `mods-available/mods-enabled`, `sites-available/sites-enabled` directory structure. +* Creates symlink for the log directory. +* Sets ownership on the document root (`chown -R apache:apache`). +* Hardens permissions on the config directory (`chmod -R g-w`). +* Ensures httpd service is in the desired state. +* Triggers: httpd.service reload. + +`apache_httpd:config` + +* Creates or updates the global Apache configuration (`httpd.conf`). +* Removes rpmnew/rpmsave files (and Debian equivalents). +* Removes, creates, disables, and enables conf-available configs. +* Triggers: httpd.service reload. + +`apache_httpd:htpasswd` + +* Creates or updates htpasswd flat-files for basic authentication. +* Triggers: none. + +`apache_httpd:matomo` + +* Deploys Matomo Log Analytics Python Script to `/usr/local/sbin/import_logs.py`. +* Triggers: none. + +`apache_httpd:mod_security_coreruleset` + +* Installs `tar`. +* Downloads and verifies the OWASP ModSecurity Core Rule Set (CRS). +* Extracts the archive and creates a symlink to the CRS directory. +* Copies the default `crs-setup.conf.example` to `crs-setup.conf`. +* Triggers: none. + +`apache_httpd:mods` -Tipp: +* Installs base packages and Apache packages/modules. +* Creates the `conf-available/conf-enabled`, `mods-available/mods-enabled`, `sites-available/sites-enabled` directory structure. +* Removes, creates, disables, and enables mods-available configs. +* Triggers: httpd.service reload. + +`apache_httpd:state` + +* Ensures httpd service is in the desired state. +* Triggers: none. + +`apache_httpd:vhosts` + +* Disables and removes sites-available vHosts. +* Creates DocumentRoot directories for all vHosts. +* Creates and enables sites-available vHosts. +* Supports `apache_httpd__limit_vhosts` to deploy only specific vHosts. +* Triggers: httpd.service reload. + +Tip: * To deploy a single vHost only, supplement the `apache_httpd:vhosts` tag with the extra variable `--extra-vars='apache_httpd__limit_vhosts=["www.example.com"]'`. See [Optional Role Variables - Specific to this role](https://github.com/Linuxfabrik/lfops/tree/main/roles/apache_httpd#optional-role-variables---specific-to-this-role). +## Skip Variables + +This role is used in several playbooks that provide skip variables to disable specific dependencies. See the playbooks documentation for details: + +* [apache_httpd.yml](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/README.md#apache_httpdyml) +* [setup_grav.yml](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/README.md#setup_gravyml) +* [setup_icinga2_master.yml](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/README.md#setup_icinga2_masteryml) +* [setup_librenms.yml](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/README.md#setup_librenmsyml) +* [setup_mastodon.yml](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/README.md#setup_mastodonyml) +* [setup_moodle.yml](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/README.md#setup_moodleyml) +* [setup_nextcloud.yml](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/README.md#setup_nextcloudyml) +* [setup_wordpress.yml](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/README.md#setup_wordpressyml) + + ## Mandatory Role Variables - Global Apache Config (core) -| Variable | Description | -| -------- | ----------- | -| `apache_httpd__conf_server_admin` | Mandatory, string. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#serveradmin) | +`apache_httpd__conf_server_admin` + +* See [ServerAdmin](https://httpd.apache.org/docs/2.4/mod/core.html#serveradmin). +* Type: String. Example: @@ -93,24 +164,108 @@ apache_httpd__conf_server_admin: 'webmaster@example.com' ## Optional Role Variables - Global Apache Config (core) -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__conf_add_default_charset` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#adddefaultcharset) | `'UTF-8'` | -| `apache_httpd__conf_document_root` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#documentroot) | `'/var/www/html'` | -| `apache_httpd__conf_enable_send_file` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#enablesendfile) | `'On'` | -| `apache_httpd__conf_error_log` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#errorlog) | `'syslog:local1'` | -| `apache_httpd__conf_hostname_lookups` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) | `'Off'` | -| `apache_httpd__conf_keep_alive` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#keepalive) | `'On'` | -| `apache_httpd__conf_keep_alive_timeout` | Number. CIS: Do not set it above `15` seconds.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#keepalivetimeout) | `5` | -| `apache_httpd__conf_limit_request_body` | Number. CIS: Do not set it above `102400`.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#limitrequestbody) | `102400` | -| `apache_httpd__conf_limit_request_field_size` | Number. CIS: Do not set it above `1024` - but this might be too small for any modern application which sets cookies in its Header.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#limitrequestfieldsize) | `8190` | -| `apache_httpd__conf_limit_request_fields` | Number. CIS: Do not set it above `100`.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#limitrequestfields) | `100` | -| `apache_httpd__conf_limit_request_line` | Number. CIS: Do not set it above `512` - but this might be too small for any modern application which sets cookies in its Header.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#limitrequestline) | `8190` | -| `apache_httpd__conf_log_level` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#loglevel) | `'warn'` | -| `apache_httpd__conf_max_keep_alive_requests` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#maxkeepaliverequests) | `500` | -| `apache_httpd__conf_server_name` | String. Mandatory, string. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#servername) | `'localhost'` | -| `apache_httpd__conf_timeout` | Number. CIS: Do not set it above `10` seconds.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#timeout) | `10` | -| `apache_httpd__conf_trace_enable` | String. CIS: Do not set it to `'On'`.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#traceenable) | `'Off'` | +`apache_httpd__conf_add_default_charset` + +* See [AddDefaultCharset](https://httpd.apache.org/docs/2.4/mod/core.html#adddefaultcharset). +* Type: String. +* Default: `'UTF-8'` + +`apache_httpd__conf_document_root` + +* See [DocumentRoot](https://httpd.apache.org/docs/2.4/mod/core.html#documentroot). +* Type: String. +* Default: `'/var/www/html'` + +`apache_httpd__conf_enable_send_file` + +* See [EnableSendfile](https://httpd.apache.org/docs/2.4/mod/core.html#enablesendfile). +* Type: String. +* Default: `'On'` + +`apache_httpd__conf_error_log` + +* See [ErrorLog](https://httpd.apache.org/docs/2.4/mod/core.html#errorlog). +* Type: String. +* Default: `'syslog:local1'` + +`apache_httpd__conf_hostname_lookups` + +* See [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups). +* Type: String. +* Default: `'Off'` + +`apache_httpd__conf_keep_alive` + +* See [KeepAlive](https://httpd.apache.org/docs/2.4/mod/core.html#keepalive). +* Type: String. +* Default: `'On'` + +`apache_httpd__conf_keep_alive_timeout` + +* See [KeepAliveTimeout](https://httpd.apache.org/docs/2.4/mod/core.html#keepalivetimeout). +* Type: Number. +* CIS: Do not set it above `15` seconds. +* Default: `5` + +`apache_httpd__conf_limit_request_body` + +* See [LimitRequestBody](https://httpd.apache.org/docs/2.4/mod/core.html#limitrequestbody). +* Type: Number. +* CIS: Do not set it above `102400`. +* Default: `102400` + +`apache_httpd__conf_limit_request_field_size` + +* See [LimitRequestFieldSize](https://httpd.apache.org/docs/2.4/mod/core.html#limitrequestfieldsize). +* Type: Number. +* CIS: Do not set it above `1024` - but this might be too small for any modern application which sets cookies in its Header. +* Default: `8190` + +`apache_httpd__conf_limit_request_fields` + +* See [LimitRequestFields](https://httpd.apache.org/docs/2.4/mod/core.html#limitrequestfields). +* Type: Number. +* CIS: Do not set it above `100`. +* Default: `100` + +`apache_httpd__conf_limit_request_line` + +* See [LimitRequestLine](https://httpd.apache.org/docs/2.4/mod/core.html#limitrequestline). +* Type: Number. +* CIS: Do not set it above `512` - but this might be too small for any modern application which sets cookies in its Header. +* Default: `8190` + +`apache_httpd__conf_log_level` + +* See [LogLevel](https://httpd.apache.org/docs/2.4/mod/core.html#loglevel). +* Type: String. +* Default: `'warn'` + +`apache_httpd__conf_max_keep_alive_requests` + +* See [MaxKeepAliveRequests](https://httpd.apache.org/docs/2.4/mod/core.html#maxkeepaliverequests). +* Type: Number. +* Default: `500` + +`apache_httpd__conf_server_name` + +* See [ServerName](https://httpd.apache.org/docs/2.4/mod/core.html#servername). +* Type: String. +* Default: `'localhost'` + +`apache_httpd__conf_timeout` + +* See [Timeout](https://httpd.apache.org/docs/2.4/mod/core.html#timeout). +* Type: Number. +* CIS: Do not set it above `10` seconds. +* Default: `10` + +`apache_httpd__conf_trace_enable` + +* See [TraceEnable](https://httpd.apache.org/docs/2.4/mod/core.html#traceenable). +* Type: String. +* CIS: Do not set it to `'On'`. +* Default: `'Off'` Example: @@ -137,17 +292,141 @@ apache_httpd__conf_trace_enable: 'Off' ## Optional Role Variables - Specific to this role -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__conf__group_var` /
`apache_httpd__conf__host_var` | List. List of dictionaries of `conf-available`/`conf-enabled` files.
Subkeys:
* `filename`: Mandatory, string. Destination filename in `conf-available/`, and normally is equal to the name of the source `template` used. Will be suffixed with `.conf`.
* `enabled`: boolean. Defaults to `true`. Creates a symlink to `conf-available/.conf` in `conf-enabled/` (`true`), otherwise the link is removed (`false`).
* `state`: string. `conf-available/.conf` is created (`present`), otherwise file is removed (`absent`).
* `template`: Mandatory, string. Name of the Jinja template source file to use.
See example below. | [Have a look](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/defaults/main.yml) | -| `apache_httpd__htpasswd__group_var` /
`apache_httpd__htpasswd__host_var` | List of dictionaries containing used to create and update the flat-files used to store usernames and password for basic authentication of HTTP users. Subkeys:
  • `username`: Mandatory, string. Username.
  • `password`: Mandatory, string. Password.
  • `path`: Optional, string. Path to the htpasswd file. Defaults to `/etc/httpd/.htpasswd`.
  • `state`: Optional, string. Either `present` or `absent`. Defaults to `present`.
| `[]` | -| `apache_httpd__limit_vhosts` | List. Checks if the `conf_server_name` is in the list and only deploys those. Can be used on the CLI to speed up the deployment on large proxy servers, e.g. `--extra-vars='apache_httpd__limit_vhosts=["test.example.com"]'`.| unset | -| `apache_httpd__mods__group_var` / `apache_httpd__mods__host_var` | List. List of dictionaries of `mods-available`/`mods-enabled` files.
Subkeys:
* `filename`: Mandatory, string. Destination filename in `mods-available/`, and normally is equal to the name of the source `template` used. Will be suffixed with `.conf`.
* `enabled`: boolean. Defaults to `true`. Creates a symlink to `mods-available/.mods` in `mods-enabled/` (`true`), otherwise the link is removed (`false`).
* `state`: string. `mods-available/.conf` is created (`present`), otherwise file is removed (`absent`).
* `template`: string. Name of the Ansible Jinja template source file to use. If ommited, `filename` is used.
See example below. | [Have a look](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/defaults/main.yml) | -| `apache_httpd__packages__group_var` / `apache_httpd__packages__host_var` | List. List of dictionaries of packages to install, related to Apache, using the OS package manager. Possible options:
* `name`: Mandatory, string. The package name.
* `state`: Mandatory, string. State of the package, one of `present`, `absent`. Packages are removed first and then added. | [Have a look](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/defaults/main.yml) | -| `apache_httpd__skip_document_root_chown` | Boolean. Set to true to skip the `chown -R apache:apache` of the document root. | `false` | -| `apache_httpd__skip_php_fpm` | Boolean. Skip PHP configuration globally and in each vHost within Apache. | `false` | -| `apache_httpd__systemd_enabled` | Boolean. Whether the Apache webserver service should start on boot (`true`) or not (`false`). | `true` | -| `apache_httpd__systemd_state` | String. Make sure Apache webserver service is in a specific state. Possible options:
* `reloaded`
* `restarted`
* `started`
* `stopped` | `'started'` | +`apache_httpd__conf__group_var` / `apache_httpd__conf__host_var` + +* conf-available/conf-enabled files. See example below. +* Type: List of dictionaries. +* Default: See [defaults/main.yml](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/defaults/main.yml) + +* Subkeys: + + * `enabled`: + + * Optional. Creates a symlink to conf-available/filename.conf in conf-enabled (true), otherwise the link is removed (false). + * Type: Bool. + * Default: `true` + + * `filename`: + + * Mandatory. Destination filename in conf-available/, normally equal to the name of the source template used. Suffixed with `.conf`. + * Type: String. + + * `state`: + + * Optional. conf-available/filename.conf is created (`present`), otherwise file is removed (`absent`). + * Type: String. + + * `template`: + + * Mandatory. Name of the Jinja template source file to use. + * Type: String. + +`apache_httpd__htpasswd__group_var` / `apache_httpd__htpasswd__host_var` + +* Create and update flat-files for basic authentication of HTTP users. +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `password`: + + * Mandatory. Password. + * Type: String. + + * `path`: + + * Optional. Path to the htpasswd file. + * Type: String. + * Default: `'/etc/httpd/.htpasswd'` + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + * Default: `'present'` + + * `username`: + + * Mandatory. Username. + * Type: String. + +`apache_httpd__limit_vhosts` + +* Checks if the `conf_server_name` is in the list and only deploys those. Can be used on the CLI to speed up the deployment on large proxy servers, e.g. `--extra-vars='apache_httpd__limit_vhosts=["test.example.com"]'`. +* Type: List. +* Default: unset + +`apache_httpd__mods__group_var` / `apache_httpd__mods__host_var` + +* mods-available/mods-enabled files. See example below. +* Type: List of dictionaries. +* Default: See [defaults/main.yml](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/defaults/main.yml) + +* Subkeys: + + * `enabled`: + + * Optional. Creates a symlink to mods-available/filename.mods in mods-enabled (true), otherwise the link is removed (false). + * Type: Bool. + * Default: `true` + + * `filename`: + + * Mandatory. Destination filename in mods-available/, normally equal to the name of the source template used. Suffixed with `.conf`. + * Type: String. + + * `state`: + + * Optional. mods-available/filename.conf is created (`present`), otherwise file is removed (`absent`). + * Type: String. + + * `template`: + + * Optional. Name of the Ansible Jinja template source file to use. If omitted, `filename` is used. + * Type: String. + +`apache_httpd__packages__group_var` / `apache_httpd__packages__host_var` + +* Packages to install using the OS package manager. Packages are removed first and then added. +* Type: List of dictionaries. +* Default: See [defaults/main.yml](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/defaults/main.yml) + +* Subkeys: + + * `name`: + + * Mandatory. The package name. + * Type: String. + + * `state`: + + * Optional. State of the package, one of `present`, `absent`. + * Type: String. + +`apache_httpd__skip_document_root_chown` + +* Set to true to skip the `chown -R apache:apache` of the document root. +* Type: Bool. +* Default: `false` + +`apache_httpd__skip_php_fpm` + +* Skip PHP-FPM configuration globally and in each vHost within Apache. When set to `false` (default), the role automatically injects PHP-FPM `ProxyPass` directives into app, localhost, and wordpress vHosts. +* Type: Bool. +* Default: `false` + +`apache_httpd__systemd_enabled` + +* Whether the Apache webserver service should start on boot (true) or not (false). +* Type: Bool. +* Default: `true` + +`apache_httpd__systemd_state` + +* Make sure Apache webserver service is in a specific state. Possible options: `reloaded`, `restarted`, `started`, `stopped`. +* Type: String. +* Default: `'started'` Example: ```yaml @@ -184,12 +463,16 @@ apache_httpd__systemd_state: 'started' ## Mandatory Role Variables - vHosts -`apache_httpd__vhosts__group_var` / `apache_httpd__vhosts__host_var`: +`apache_httpd__vhosts__group_var` / `apache_httpd__vhosts__host_var` -| Variable | Description | -| -------- | ----------- | -| `conf_server_name` | Mandatory, string. Set this variable for each vHost definition. Although this is just best practise, we would never use a vHost without a ServerName. | +* vHost definitions for Apache. See the "Optional Role Variables - vHosts" section below for all available subkeys. +* Type: List of dictionaries. +* Subkeys: + * `conf_server_name`: + + * Mandatory. Set this variable for each vHost definition. Although this is just best practice, we would never use a vHost without a ServerName. + * Type: String. Example: ```yaml @@ -207,73 +490,249 @@ Using `apache_httpd__vhosts__group_var` or `apache_httpd__vhosts__host_var` (whi Types of vHosts: -* **app**
- A hardened vHost running an application like Nextcloud, Wordpress etc. with the most common options. Can be extended by using the `raw` variable.
-* **localhost**
- A hardened, pre-defined VirtualHost just listening on https://localhost, and only accessible from localhost. Due to its naming, it is the first defined vHost. Useful for
Apache status info etc. Can be extended by using the `raw` variable. The following URLs are pre-configured, accessible just from localhost: `/fpm-ping`, `/fpm-status`, `/monitoring.php`, `/server-info`, `/server-status`. -* **proxy**
- A typical hardened reverse proxy vHost. Can be extended by using the `raw` variable. This proxy vHost definition prevents Apache from functioning as a forward proxy
server (inside > out). -* **redirect**
- A vHost that redirects from one port (default "80") to another (default "443"). Custom redirect rules can be provided using the `raw` variable.
-* **raw**
- If none of the above vHost templates fit, use the `raw` one and define everything except `` and `` completely from scratch.
-* **wordpress**
- A special vHost just for deploying WordPress instances. +* **app**: A hardened vHost running an application like Nextcloud, Wordpress etc. with the most common options. Can be extended by using the `raw` variable. +* **localhost**: A hardened, pre-defined VirtualHost just listening on https://localhost, and only accessible from localhost. Due to its naming, it is the first defined vHost. Can be extended by using the `raw` variable. The following URLs are pre-configured and only accessible from localhost: + + * `/fpm-ping` - PHP-FPM health check + * `/fpm-status` - PHP-FPM status page + * `/monitoring.php` - Linuxfabrik monitoring endpoint + * `/server-info` - Apache server info (`mod_info` required) + * `/server-status` - Apache server status (`mod_status` required) +* **proxy**: A typical hardened reverse proxy vHost. Can be extended by using the `raw` variable. This proxy vHost definition prevents Apache from functioning as a forward proxy server (inside > out). +* **raw**: If none of the above vHost templates fit, use the `raw` one and define everything except `` and `` completely from scratch. +* **redirect**: A vHost that redirects from one port (default "80") to another (default "443"). Custom redirect rules can be provided using the `raw` variable. +* **wordpress**: A special vHost just for deploying WordPress instances. "Hardened" means among other things: * Old HTTP protocol (< HTTP/1.1) versions are disallowed. * IP address based requests are disallowed. * Number of bytes that are allowed in a request are limited. -* etc. - -This role creates a vHost named `localhost` by default. Have a look at the [defaults/main.yml](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/defaults/main.yml) | - -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `allow_accessing_dotfiles` | Boolean. app-vHosts block access to files that begin with a period. With this setting you can disable this behavior. | `false` | -| `allow_requests_without_hostname` | Boolean. app-vHosts forbid accessing them without a hostname / just by IP. With this setting you can disable this behavior. | `false` | -| `allowed_file_extensions` | List. app- and localhost-vHosts block ALL file extensions by default (including `.gitignore`, `.svn`, `.htaccess`, `.hg`, `.bzr` etc.), unless specifically allowed. Use `find {{ apache_httpd__conf_document_root }} -type f -name '*.*' \| awk -F. '{print $NF }' \| sort --unique \| sed -e 's/^/- \x27/' -e 's/$/\x27/'` to compile a list of the file extensions that are currently present in your application. Note: The vHost templates already ensure that files and folders starting with a dot (".") are forbidden. Use `skip_allowed_file_extensions` to allow all file extensions. | * app: `['css', 'gif', 'html?', 'ico', 'jpe?g', 'js', 'pdf', 'php', 'png', 'svg', 'ttf', 'txt', 'woff2?']`
* localhost: `['css', 'gif', 'html?', 'ico', 'jpe?g', 'js', 'pdf', 'php', 'png', 'svg', 'ttf', 'txt', 'woff2?']`
| -| `allowed_http_methods` | List. Should be used to disable unwanted [HTTP methods](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods). Only the explicity listed ones are allowed. Use `skip_allowed_http_methods` to allow all HTTP methods. Returns a [405 - Method Not Allowed](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes) if a forbidden HTTP method is used.
* This does not disable TRACE.
* Always enable GET and OPTIONS at least. For an OPTIONS request, Apache always returns `Allow: GET,POST,OPTIONS,HEAD`, no matter what.
* We are NOT using [LimitExcept](https://httpd.apache.org/docs/2.4/mod/core.html#limitexcept), because this directive is not allowed in a VirtualHost context.

Available HTTP methods:
* CONNECT
* DELETE
* GET
* HEAD
* OPTIONS
* PATCH
* POST
* PUT

Available WebDAV methods:
* COPY
* LOCK
* MKCOL
* MOVE
* PROPFIND
* PROPPATCH
* UNLOCK | * app: `['GET', 'OPTIONS']`
* localhost: `['GET', 'OPTIONS']`
* proxy: `['GET', 'OPTIONS']` | -| `authz_document_root` | String. Authorization statement for the `DocumentRoot {{ apache_httpd__conf_document_root }}/{{ conf_server_name }}` directive. | * app: `'Require all granted'`
* localhost: `'Require all granted'` | -| `by_role` | String. If defined it results in a comment `# Generated by Ansible role: {{ by_role }}` at the beginning of a vHost definition. | * app: unset
* localhost: unset
* proxy: unset
* raw: unset
* redirect: unset | -| `comment` | String. Describes the vHost and results in a comment right above the `` section. | * app: `'no description available'`
* localhost: `'no description available'`
* proxy: `'no description available'`
* raw: `'no description available'` | -| `conf_allow_override` | String. Will be set in the `` directive of the vHost.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#allowoverride) | * app: `'None'`
* localhost: `'None'` | -| `conf_custom_log` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mod_log_config.html#customlog). The log format has to be of
* `agent`
* `combined`
* `combinedio`
* `common`
* `debug`
* `fail2ban`
* `linuxfabrikio`
* `matomo`
* `referer`
* `vhost_common` | * app: `'logs/{{ conf_server_name }}-access.log linuxfabrikio`
* localhost: `'logs/{{ conf_server_name }}-access.log linuxfabrikio<`br> * proxy: `'logs/{{ conf_server_name }}-access.log linuxfabrikio` | -| `conf_directory_index` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mod_dir.html#directoryindex) | * app: `{{ apache_httpd__mod_dir_directory_index }}` | -| `conf_document_root` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#documentroot) | * app: `{{ apache_httpd__conf_document_root}}/{{ conf_server_name }}`
* localhost: `{{ apache_httpd__conf_document_root}}/{{ conf_server_name }}` | -| `conf_error_log` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#errorlog) | * app: `logs/{{ conf_server_name }}-error.log`
* localhost: `logs/{{ conf_server_name }}-error.log`
* proxy: `logs/{{ conf_server_name }}-error.log` | -| `conf_keep_alive_timeout` | Number. CIS: Do not set it above '15' seconds.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#keepalivetimeout) | * app: `5`
* localhost: `5`
* proxy: `5` | -| `conf_log_level` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#loglevel) | * app: `'notice core:info'`
* localhost: `'notice core:info'`
* proxy: `'notice core:info'` | -| `conf_options` | String. Sets the `Options` for the `` directive.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#options) | * app: `'None'`
* localhost: `'None'` | -| `conf_proxy_error_override` | String. If you want to have a common look and feel on the error pages seen by the end user, set this to "On" and define them on the reverse proxy server.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#proxyerroroverride) | * proxy: `'On'` | -| `conf_proxy_preserve_host` | String. When enabled, this option will pass the `Host:` line from the incoming request to the proxied host, instead of the hostname specified in the `ProxyPass` line.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#proxypreservehost) | `'Off'` | -| `conf_proxy_timeout` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#proxytimeout) | `5` | -| `conf_request_read_timeout` | Number. CIS:
* Do not set the Timeout Limits for Request Headers above 40.
* Do not set the Timeout Limits for the Request Body above 20.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/mod_reqtimeout.html#requestreadtimeout) | * app: `'header=20-40,MinRate=500 body=20,MinRate=500'`
* localhost: `'header=20-40,MinRate=500 body=20,MinRate=500'`
* proxy: `'header=20-40,MinRate=500 body=20,MinRate=500'` | -| `conf_server_admin` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#serveradmin) | * app: `{{ apache_httpd__conf_server_admin }}`
* localhost: `{{ apache_httpd__conf_server_admin }}`
* proxy: `{{ apache_httpd__conf_server_admin }}` | -| `conf_server_alias` | List. Set this only if you need more than one `conf_server_name`.
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#serveralias) | * app: unset
* localhost: unset
* proxy: unset | -| `conf_server_name` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#servername) | * app: unset
* localhost: unset
* proxy: unset
* redirect: unset | -| `conf_timeout` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/core.html#timeout) | * app: `{{ apache_httpd__conf_timeout }}`
* localhost: `{{ apache_httpd__conf_timeout }}`
* proxy: `{{ apache_httpd__conf_timeout }}` | -| `enabled` | Boolean. Enable this vHost. | `true` | -| `filename` | String. The filename of the vHost definition. If not set it defaults to the `conf_server_name` variable. If not set, the filename is automatically suffixed by `.virtualhost_port.conf`. | conf_server_name.virtualhost_port.conf | -| `php_set_handler` | String. Set the handler for PHP
* socket-based: `SetHandler "proxy:unix:/run/php-fpm/www.sock\|fcgi://localhost"`
* network-based: `SetHandler "proxy:fcgi://127.0.0.1:9000/"` | * app: `'SetHandler "proxy:unix:/run/php-fpm/www.sock\|fcgi://localhost"'`
* localhost: `'SetHandler "proxy:unix:/run/php-fpm/www.sock\|fcgi://localhost"'` | -| `raw` | String. It is sometimes desirable to pass variable content that Jinja would handle as variables or blocks. Jinja's `{% raw %}` statement does not work in Ansible. The best and safest solution is to declare `raw` variables as `!unsafe`, to prevent templating errors and information disclosure. | * app: unset
* localhost: unset
* proxy: unset
* raw: unset
* redirect: unset | -| `skip_allowed_file_extensions` | Boolean. Skips checking file extensions in app- and localhost-vHosts, allowing essentially all file extensions. | `false` | -| `skip_allowed_http_methods` | Boolean. Skips checking the HTTP methods in app-, localhost-, proxy-, wordpress-vHosts, allowing essentially all HTTP methods. | `false` | -| `state` | String. Should the vhost definition file be created (`present`) or deleted (`absent`). | * app: unset
* localhost: `'present'`
* proxy: unset
* raw: unset
* redirect: unset | -| `template` | String. Have a look at the intro of this paragraph. | unset | -| `virtualhost_ip` | String. Used within the `` directive. | * app: `'*'`
* localhost: `'*'`
* proxy: `'*'`
* raw: `'*'`
* redirect: `'*'` | -| `virtualhost_port` | Number. Used within the `` directive. | * app: `443`
* localhost: `443`
* proxy: `443`
* raw: `443`
* redirect: `80` | - -Example: [Have a look here](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/EXAMPLES.md). +* Access to dotfiles (files starting with `.`) is blocked, except `.well-known` (for ACME/Let's Encrypt challenges). +* Forbidden HTTP methods return a `405 Method Not Allowed` via `RewriteRule`. + +This role creates a vHost named `localhost` by default. See [defaults/main.yml](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/defaults/main.yml) + +`allow_accessing_dotfiles` + +* app-vHosts block access to files that begin with a period. With this setting you can disable this behavior. +* Type: Bool. +* Default: `false` + +`allow_requests_without_hostname` + +* app-vHosts forbid accessing them without a hostname / just by IP. With this setting you can disable this behavior. +* Type: Bool. +* Default: `false` + +`allowed_file_extensions` + +* app- and localhost-vHosts block ALL file extensions by default, unless specifically allowed. The patterns use Apache regex syntax (e.g. `html?` matches both `html` and `htm`, `jpe?g` matches both `jpeg` and `jpg`). Files and folders starting with a dot are always forbidden. Use `skip_allowed_file_extensions` to allow all file extensions. +* To compile a list of file extensions present in your application, run: + `find {{ apache_httpd__conf_document_root }} -type f -name '*.*' | awk -F. '{print $NF }' | sort --unique | sed -e 's/^/- \x27/' -e 's/$/\x27/'` +* Type: List. +* Default: app/localhost `['css', 'gif', 'html?', 'ico', 'jpe?g', 'js', 'pdf', 'php', 'png', 'svg', 'ttf', 'txt', 'woff2?']` + +`allowed_http_methods` + +* Restrict allowed [HTTP methods](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods). Only the explicitly listed ones are allowed; all others return [405 Method Not Allowed](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes). This does not disable TRACE. Always enable GET and OPTIONS at least. For an OPTIONS request, Apache always returns `Allow: GET,POST,OPTIONS,HEAD`, no matter what. We are NOT using [LimitExcept](https://httpd.apache.org/docs/2.4/mod/core.html#limitexcept), because this directive is not allowed in a VirtualHost context. Use `skip_allowed_http_methods` to allow all HTTP methods. +* Available HTTP methods: + + * CONNECT, DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT + +* Available WebDAV methods: + + * COPY, LOCK, MKCOL, MOVE, PROPFIND, PROPPATCH, UNLOCK +* Type: List. +* Default: app/localhost/proxy `['GET', 'OPTIONS']` + +`authz_document_root` + +* Authorization statement for the `DocumentRoot {{ apache_httpd__conf_document_root }}/{{ conf_server_name }}` directive. +* Type: String. +* Default: app/localhost `'Require all granted'` + +`by_role` + +* If defined it results in a comment `# Generated by Ansible role: {{ by_role }}` at the beginning of a vHost definition. +* Type: String. +* Default: unset + +`comment` + +* Describes the vHost and results in a comment right above the `` section. +* Type: String. +* Default: `'no description available'` + +`conf_allow_override` + +* Will be set in the `` directive of the vHost. See [AllowOverride](https://httpd.apache.org/docs/2.4/mod/core.html#allowoverride). +* Type: String. +* Default: app/localhost `'None'` + +`conf_custom_log` + +* The log format has to be one of: `agent`, `combined`, `combinedio`, `common`, `debug`, `fail2ban`, `linuxfabrikio`, `matomo`, `referer`, `vhost_common`. See [CustomLog](https://httpd.apache.org/docs/2.4/mod/mod_log_config.html#customlog). +* Type: String. +* Default: app/localhost/proxy `'logs/{{ conf_server_name }}-access.log linuxfabrikio'` + +`conf_directory_index` + +* See [DirectoryIndex](https://httpd.apache.org/docs/2.4/mod/mod_dir.html#directoryindex). +* Type: String. +* Default: app `{{ apache_httpd__mod_dir_directory_index }}` + +`conf_document_root` + +* See [DocumentRoot](https://httpd.apache.org/docs/2.4/mod/core.html#documentroot). +* Type: String. +* Default: app/localhost `'{{ apache_httpd__conf_document_root }}/{{ conf_server_name }}'` + +`conf_error_log` + +* See [ErrorLog](https://httpd.apache.org/docs/2.4/mod/core.html#errorlog). +* Type: String. +* Default: app/localhost/proxy `'logs/{{ conf_server_name }}-error.log'` + +`conf_keep_alive_timeout` + +* See [KeepAliveTimeout](https://httpd.apache.org/docs/2.4/mod/core.html#keepalivetimeout). +* Type: Number. +* CIS: Do not set it above `15` seconds. +* Default: `5` + +`conf_log_level` + +* See [LogLevel](https://httpd.apache.org/docs/2.4/mod/core.html#loglevel). +* Type: String. +* Default: `'notice core:info'` + +`conf_options` + +* Sets the `Options` for the `` directive. See [Options](https://httpd.apache.org/docs/2.4/mod/core.html#options). +* Type: String. +* Default: app/localhost `'None'` + +`conf_proxy_error_override` + +* If you want to have a common look and feel on the error pages seen by the end user, set this to "On" and define them on the reverse proxy server. See [ProxyErrorOverride](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#proxyerroroverride). +* Type: String. +* Default: proxy `'On'` + +`conf_proxy_preserve_host` + +* When enabled, this option will pass the `Host:` line from the incoming request to the proxied host, instead of the hostname specified in the `ProxyPass` line. See [ProxyPreserveHost](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#proxypreservehost). +* Type: String. +* Default: `'Off'` + +`conf_proxy_timeout` + +* See [ProxyTimeout](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#proxytimeout). +* Type: Number. +* Default: `5` + +`conf_request_read_timeout` + +* See [RequestReadTimeout](https://httpd.apache.org/docs/2.4/mod/mod_reqtimeout.html#requestreadtimeout). +* Type: Number. +* CIS: Do not set the Timeout Limits for Request Headers above 40. Do not set the Timeout Limits for the Request Body above 20. +* Default: `'header=20-40,MinRate=500 body=20,MinRate=500'` + +`conf_server_admin` + +* See [ServerAdmin](https://httpd.apache.org/docs/2.4/mod/core.html#serveradmin). +* Type: String. +* Default: `{{ apache_httpd__conf_server_admin }}` + +`conf_server_alias` + +* Set this only if you need more than one `conf_server_name`. See [ServerAlias](https://httpd.apache.org/docs/2.4/mod/core.html#serveralias). +* Type: List. +* Default: unset + +`conf_server_name` + +* See [ServerName](https://httpd.apache.org/docs/2.4/mod/core.html#servername). +* Type: String. +* Default: unset + +`conf_timeout` + +* See [Timeout](https://httpd.apache.org/docs/2.4/mod/core.html#timeout). +* Type: Number. +* Default: `{{ apache_httpd__conf_timeout }}` + +`enabled` + +* Enable this vHost. +* Type: Bool. +* Default: `true` + +`filename` + +* The filename of the vHost definition. If not set it defaults to the `conf_server_name` variable. The filename is automatically suffixed by `.virtualhost_port.conf`. +* Type: String. +* Default: `conf_server_name.virtualhost_port.conf` + +`php_set_handler` + +* Set the handler for PHP. Socket-based: `SetHandler "proxy:unix:/run/php-fpm/www.sock|fcgi://localhost"`. Network-based: `SetHandler "proxy:fcgi://127.0.0.1:9000/"`. +* Type: String. +* Default: app/localhost `'SetHandler "proxy:unix:/run/php-fpm/www.sock|fcgi://localhost"'` + +`raw` + +* It is sometimes desirable to pass variable content that Jinja would handle as variables or blocks. The best and safest solution is to declare `raw` variables as `!unsafe`, to prevent templating errors and information disclosure. +* Type: String. +* Default: unset + +`skip_allowed_file_extensions` + +* Skips checking file extensions in app- and localhost-vHosts, allowing essentially all file extensions. +* Type: Bool. +* Default: `false` + +`skip_allowed_http_methods` + +* Skips checking the HTTP methods in app-, localhost-, proxy-, wordpress-vHosts, allowing essentially all HTTP methods. +* Type: Bool. +* Default: `false` + +`state` + +* Should the vhost definition file be created (`present`) or deleted (`absent`). +* Type: String. +* Default: localhost `'present'`, others unset + +`template` + +* See the "Types of vHosts" section above. +* Type: String. +* Default: unset + +`virtualhost_ip` + +* Used within the `` directive. +* Type: String. +* Default: `'*'` + +`virtualhost_port` + +* Used within the `` directive. +* Type: Number. +* Default: `443`, redirect `80` + +Example: See [EXAMPLES.md](https://github.com/Linuxfabrik/lfops/blob/main/roles/apache_httpd/EXAMPLES.md). ## Optional Role Variables - mod_dir -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__mod_dir_directory_index` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mod_dir.html#directoryindex) | `'index.html index.htm index.txt'` | +`apache_httpd__mod_dir_directory_index` + +* See [DirectoryIndex](https://httpd.apache.org/docs/2.4/mod/mod_dir.html#directoryindex). +* Type: String. +* Default: `'index.html index.htm index.txt'` Example: @@ -287,9 +746,11 @@ apache_httpd__mod_dir_directory_index: 'index.html' This module is for flexible logging of client requests. Logs are written in a customizable format, and may be written directly to a file, or to an external program. Conditional logging is provided so that individual requests may be included or excluded from the logs based on characteristics of the request. -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__mod_log_config_custom_log` | String. One of
* `agent`
* `combined`
* `combinedio`
* `common`
* `debug`
* `fail2ban`
* `linuxfabrikio`
* `matomo`
* `referer`
* `vhost_common`
[Apache Directive](https://httpd.apache.org/docs/2.4/mod/mod_log_config.html#customlog) | unset | +`apache_httpd__mod_log_config_custom_log` + +* Global log directive that applies to requests not handled by any vHost. Each vHost defines its own log via `conf_custom_log`. One of: `agent`, `combined`, `combinedio`, `common`, `debug`, `fail2ban`, `linuxfabrikio`, `matomo`, `referer`, `vhost_common`. See [CustomLog](https://httpd.apache.org/docs/2.4/mod/mod_log_config.html#customlog). +* Type: String. +* Default: unset Example: ```yaml @@ -300,11 +761,23 @@ apache_httpd__mod_log_config_custom_log: 'logs/access.log combined' ## Optional Role Variables - mod_security (security2) -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__mod_security_coreruleset_url` | String. The OWASP ModSecurity Core Rule Set (CRS) Download URL. Change this if you are running your own mirror servers. | `'https://github.com/coreruleset/coreruleset/archive'` | -| `apache_httpd__mod_security_coreruleset_version` | String. The OWASP ModSecurity Core Rule Set (CRS) version number without "v". | `'4.4.0'` | -| `apache_httpd__skip_mod_security_coreruleset` | Boolean. Skip the installation of the OWASP ModSecurity Core Rule Set (CRS). | `true` | +`apache_httpd__mod_security_coreruleset_url` + +* The OWASP ModSecurity Core Rule Set (CRS) Download URL. Change this if you are running your own mirror servers. +* Type: String. +* Default: `'https://github.com/coreruleset/coreruleset/archive'` + +`apache_httpd__mod_security_coreruleset_version` + +* The OWASP ModSecurity Core Rule Set (CRS) version number without "v". +* Type: String. +* Default: `'4.24.1'` + +`apache_httpd__skip_mod_security_coreruleset` + +* Skip the installation of the OWASP ModSecurity Core Rule Set (CRS). +* Type: Bool. +* Default: `true` Example: ```yaml @@ -317,9 +790,11 @@ apache_httpd__skip_mod_security_coreruleset: true ## Optional Role Variables - mod_ssl -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__mod_ssl_ssl_use_stapling` | String. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mod_ssl.html#sslusestapling) | `'on'` | +`apache_httpd__mod_ssl_ssl_use_stapling` + +* See [SSLUseStapling](https://httpd.apache.org/docs/2.4/mod/mod_ssl.html#sslusestapling). +* Type: String. +* Default: `'on'` Example: ```yaml @@ -330,9 +805,11 @@ apache_httpd__mod_ssl_ssl_use_stapling: 'on' ## Optional Role Variables - mpm_common -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__mpm_common_listen` | List of numbers or strings. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#listen) | `[80]` | +`apache_httpd__mpm_common_listen` + +* See [Listen](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#listen). +* Type: List of numbers or strings. +* Default: `[80]` Example: @@ -358,16 +835,47 @@ threads will make for better resource utilization and performance. Best for PHP-FPM. Default. +`apache_httpd__mpm_event_max_connections_per_child` + +* See [MaxConnectionsPerChild](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxconnectionsperchild). +* Type: Number. +* Default: `0` -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__mpm_event_max_connections_per_child` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxconnectionsperchild) | `0` | -| `apache_httpd__mpm_event_max_request_workers` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxrequestworkers) | `400` | -| `apache_httpd__mpm_event_max_spare_threads` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxsparethreads) | `250` | -| `apache_httpd__mpm_event_min_spare_threads` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#minsparethreads) | `75` | -| `apache_httpd__mpm_event_start_servers` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers) | `3` | -| `apache_httpd__mpm_event_thread_limit` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#threadlimit) | `64` | -| `apache_httpd__mpm_event_threads_per_child` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#threadsperchild) | `25` | +`apache_httpd__mpm_event_max_request_workers` + +* See [MaxRequestWorkers](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxrequestworkers). +* Type: Number. +* Default: `400` + +`apache_httpd__mpm_event_max_spare_threads` + +* See [MaxSpareThreads](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxsparethreads). +* Type: Number. +* Default: `250` + +`apache_httpd__mpm_event_min_spare_threads` + +* See [MinSpareThreads](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#minsparethreads). +* Type: Number. +* Default: `75` + +`apache_httpd__mpm_event_start_servers` + +* See [StartServers](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers). +* Type: Number. +* Default: `3` + +`apache_httpd__mpm_event_thread_limit` + +* See [ThreadLimit](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#threadlimit). +* Type: Number. +* Default: `64` + +`apache_httpd__mpm_event_threads_per_child` + +* See [ThreadsPerChild](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#threadsperchild). +* Type: Number. +* Default: `25` Example: @@ -398,13 +906,35 @@ This MPM is very self-regulating, so it is rarely necessary to adjust its config Best for Standard PHP running any version of `mod_php`. Does not work with http2. -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__mpm_prefork_max_connections_per_child` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxconnectionsperchild) | `0` | -| `apache_httpd__mpm_prefork_max_request_workers` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxrequestworkers) | `256` | -| `apache_httpd__mpm_prefork_max_spare_threads` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxsparethreads) | `10` | -| `apache_httpd__mpm_prefork_min_spare_threads` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#minsparethreads) | `5` | -| `apache_httpd__mpm_prefork_start_servers` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers) | `5` | +`apache_httpd__mpm_prefork_max_connections_per_child` + +* See [MaxConnectionsPerChild](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxconnectionsperchild). +* Type: Number. +* Default: `0` + +`apache_httpd__mpm_prefork_max_request_workers` + +* See [MaxRequestWorkers](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxrequestworkers). +* Type: Number. +* Default: `256` + +`apache_httpd__mpm_prefork_max_spare_servers` + +* See [MaxSpareServers](https://httpd.apache.org/docs/2.4/mod/prefork.html#maxspareservers). +* Type: Number. +* Default: `10` + +`apache_httpd__mpm_prefork_min_spare_servers` + +* See [MinSpareServers](https://httpd.apache.org/docs/2.4/mod/prefork.html#minspareservers). +* Type: Number. +* Default: `5` + +`apache_httpd__mpm_prefork_start_servers` + +* See [StartServers](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers). +* Type: Number. +* Default: `5` Example: ```yaml @@ -432,15 +962,47 @@ The most important directives used to control this MPM are `apache_httpd__mpm_wo Best for mod_qos if you intend to use any connection level control directive ("QS_Srv\*"), which is normally done on a Reverse Proxy. Works with PHP-FPM, too. -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__mpm_worker_max_connections_per_child` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxconnectionsperchild) | `0` | -| `apache_httpd__mpm_worker_max_request_workers` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxrequestworkers) | `400` | -| `apache_httpd__mpm_worker_max_spare_threads` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxsparethreads) | `250` | -| `apache_httpd__mpm_worker_min_spare_threads` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#minsparethreads) | `75` | -| `apache_httpd__mpm_worker_start_servers` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers) | `3` | -| `apache_httpd__mpm_worker_thread_limit` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#threadlimit) | `64` | -| `apache_httpd__mpm_worker_threads_per_child` | Number. [Apache Directive](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#threadsperchild) | `25` | +`apache_httpd__mpm_worker_max_connections_per_child` + +* See [MaxConnectionsPerChild](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxconnectionsperchild). +* Type: Number. +* Default: `0` + +`apache_httpd__mpm_worker_max_request_workers` + +* See [MaxRequestWorkers](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxrequestworkers). +* Type: Number. +* Default: `400` + +`apache_httpd__mpm_worker_max_spare_threads` + +* See [MaxSpareThreads](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxsparethreads). +* Type: Number. +* Default: `250` + +`apache_httpd__mpm_worker_min_spare_threads` + +* See [MinSpareThreads](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#minsparethreads). +* Type: Number. +* Default: `75` + +`apache_httpd__mpm_worker_start_servers` + +* See [StartServers](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers). +* Type: Number. +* Default: `3` + +`apache_httpd__mpm_worker_thread_limit` + +* See [ThreadLimit](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#threadlimit). +* Type: Number. +* Default: `64` + +`apache_httpd__mpm_worker_threads_per_child` + +* See [ThreadsPerChild](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#threadsperchild). +* Type: Number. +* Default: `25` Example: @@ -458,11 +1020,31 @@ apache_httpd__mpm_worker_threads_per_child: 25 ## Optional Role Variables - wsgi_python3_module -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_httpd__wsgi_python_home` | String. [Apache Directive](https://modwsgi.readthedocs.io/en/develop/configuration-directives/WSGIPythonHome.html) | `/opt/python` | -| `apache_httpd__wsgi_python_path` | String. [Apache Directive](https://modwsgi.readthedocs.io/en/develop/configuration-directives/WSGIPythonPath.html) | `/var/www/html/python/` | -| `apache_httpd__wsgi_script_alias` | String. [Apache Directive](https://modwsgi.readthedocs.io/en/develop/configuration-directives/WSGIScriptAlias.html) | `/ /var/www/html/python/index.py` | +`apache_httpd__wsgi_python_home` + +* See [WSGIPythonHome](https://modwsgi.readthedocs.io/en/develop/configuration-directives/WSGIPythonHome.html). +* Type: String. +* Default: `'/opt/python'` + +`apache_httpd__wsgi_python_path` + +* See [WSGIPythonPath](https://modwsgi.readthedocs.io/en/develop/configuration-directives/WSGIPythonPath.html). +* Type: String. +* Default: `'/var/www/html/python/'` + +`apache_httpd__wsgi_script_alias` + +* See [WSGIScriptAlias](https://modwsgi.readthedocs.io/en/develop/configuration-directives/WSGIScriptAlias.html). +* Type: String. +* Default: `'/ /var/www/html/python/index.py'` + +Example: +```yaml +# optional - wsgi_python3_module +apache_httpd__wsgi_python_home: '/opt/python' +apache_httpd__wsgi_python_path: '/var/www/html/python/' +apache_httpd__wsgi_script_alias: '/ /var/www/html/python/index.py' +``` diff --git a/roles/apache_httpd/tasks/confs.yml b/roles/apache_httpd/tasks/confs.yml index fe72acf62..e50ef9ed1 100644 --- a/roles/apache_httpd/tasks/confs.yml +++ b/roles/apache_httpd/tasks/confs.yml @@ -25,6 +25,7 @@ - name: 'Create conf-available configs' ansible.builtin.template: + backup: true src: 'etc/httpd/conf-available/{{ item["template"]}}.conf.j2' dest: '{{ apache_httpd__config_path }}/{{ apache_httpd__config_prefix }}conf-available/{{ item["filename"] }}.conf' owner: 'root' diff --git a/roles/apache_httpd/tasks/main.yml b/roles/apache_httpd/tasks/main.yml index bf116350e..0efb7fd9e 100644 --- a/roles/apache_httpd/tasks/main.yml +++ b/roles/apache_httpd/tasks/main.yml @@ -82,6 +82,7 @@ - name: 'Create or update global Apache configuration {{ apache_httpd__config_file }}' ansible.builtin.template: + backup: true src: 'etc/httpd/conf/httpd.conf.j2' dest: '{{ apache_httpd__config_file }}' owner: 'root' diff --git a/roles/apache_httpd/tasks/matomo.yml b/roles/apache_httpd/tasks/matomo.yml index 563c1f35a..249b30b65 100644 --- a/roles/apache_httpd/tasks/matomo.yml +++ b/roles/apache_httpd/tasks/matomo.yml @@ -1,5 +1,6 @@ - name: 'Deploy Matomo Log Analytics Python Script to /usr/local/sbin/import_logs.py' ansible.builtin.template: + backup: true src: 'usr/local/sbin/import_logs.py.j2' dest: '/usr/local/sbin/import_logs.py' owner: '{{ apache_httpd__user }}' diff --git a/roles/apache_httpd/tasks/mods.yml b/roles/apache_httpd/tasks/mods.yml index 751c70228..6b9b16a9b 100644 --- a/roles/apache_httpd/tasks/mods.yml +++ b/roles/apache_httpd/tasks/mods.yml @@ -25,6 +25,7 @@ - name: 'Create mods-available configs' ansible.builtin.template: + backup: true src: 'etc/httpd/mods-available/{{ item["template"] | d(item["filename"]) }}.conf.j2' dest: '{{ apache_httpd__config_path }}/{{ apache_httpd__config_prefix }}mods-available/{{ item["filename"] }}.conf' owner: 'root' diff --git a/roles/apache_httpd/tasks/vhosts.yml b/roles/apache_httpd/tasks/vhosts.yml index 80b96ff01..785ed750b 100644 --- a/roles/apache_httpd/tasks/vhosts.yml +++ b/roles/apache_httpd/tasks/vhosts.yml @@ -47,6 +47,7 @@ - name: 'Create sites-available vHosts' ansible.builtin.template: + backup: true src: 'etc/httpd/sites-available/{{ item["template"] }}.conf.j2' dest: '{{ apache_httpd__config_path }}/{{ apache_httpd__config_prefix }}sites-available/{{ item["filename"] | d(item["conf_server_name"] ~ "." ~ (item["virtualhost_port"] | d(443))) }}.conf' owner: 'root' diff --git a/roles/apache_solr/README.md b/roles/apache_solr/README.md index 6b4af6208..473e79931 100644 --- a/roles/apache_solr/README.md +++ b/roles/apache_solr/README.md @@ -15,21 +15,36 @@ This Ansible role If you use the [Apache Solr Playbook](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/apache_solr.yml), this is automatically done for you. + ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `apache_solr` | Installs and configures the whole Apache Solr server and deploys `bin/solr.in.sh`, `log4j.xml.j2` and `security.json` | Restarts solr.service | -| `apache_solr:state` | Manages the state of `solr.service` | - | -| `apache_solr:user` | Generates hashed passwords and deploys `security.json` | Restarts solr.service | +`apache_solr` + +* Installs and configures the whole Apache Solr server and deploys `bin/solr.in.sh`, `log4j.xml.j2` and `security.json`. +* Triggers: solr.service restart. + +`apache_solr:state` + +* Manages the state of `solr.service`. +* Triggers: none. + +`apache_solr:user` + +* Generates hashed passwords and deploys `security.json`. +* Triggers: solr.service restart. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `apache_solr__checksum` | String. The SHA512 checksum according to your version. See `solr-X.X.X.tgz.sha512` file at https://archive.apache.org/dist/solr/solr/ for Solr 9+, https://archive.apache.org/dist/lucene/solr/ for Solr 8-. | -| `apache_solr__version` | The version to install. See https://archive.apache.org/dist/solr/solr/ for Solr 9+, https://archive.apache.org/dist/lucene/solr/ for Solr 8-. | +`apache_solr__checksum` + +* The SHA512 checksum according to your version. See `solr-X.X.X.tgz.sha512` file at https://archive.apache.org/dist/solr/solr/ for Solr 9+, https://archive.apache.org/dist/lucene/solr/ for Solr 8-. +* Type: String. + +`apache_solr__version` + +* The version to install. See https://archive.apache.org/dist/solr/solr/ for Solr 9+, https://archive.apache.org/dist/lucene/solr/ for Solr 8-. +* Type: String. Example: ```yaml @@ -43,24 +58,140 @@ apache_solr__version: '9.4.0' ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_solr__data_dir` | String. [SOLR_DATA_HOME](https://solr.apache.org/guide/solr/latest/configuration-guide/index-location-format.html) | `'/var/solr/data'` | -| `apache_solr__group` | String. Group running the systemd service. | `'solr'` | -| `apache_solr__http_bind_address` | String. [SOLR_JETTY_HOST](https://solr.apache.org/guide/solr/latest/deployment-guide/taking-solr-to-production.html#security-considerations) | `'0.0.0.0'` | -| `apache_solr__http_bind_port` | Number. [SOLR_PORT](https://solr.apache.org/guide/solr/latest/deployment-guide/upgrading-a-solr-cluster.html#planning-your-upgrade) | `8983` | -| `apache_solr__install_dir` | String. Where to install Apache Solr to. | `'/opt'` | -| `apache_solr__log4j_props` | String. [LOG4J_PROPS](https://solr.apache.org/guide/solr/latest/deployment-guide/taking-solr-to-production.html#log-settings) | `'/var/solr/log4j2.xml'` | -| `apache_solr__log_level` | String. [SOLR_LOG_LEVEL](https://solr.apache.org/guide/solr/latest/deployment-guide/configuring-logging.html) | `'INFO'` | -| `apache_solr__logs_dir` | String. [SOLR_LOGS_DIR](https://solr.apache.org/guide/solr/latest/deployment-guide/configuring-logging.html#permanent-logging-settings) | `'/var/log/solr'` | -| `apache_solr__pid_dir` | String. [SOLR_PID_DIR](https://solr.apache.org/guide/solr/latest/deployment-guide/taking-solr-to-production.html#environment-overrides-include-file) | `'/var/solr'` | -| `apache_solr__roles__group_var`/
`apache_solr__roles__host_var` | List of dictionaries. Roles bridge the gap between users and permissions. The roles can be used with any of the authentication plugins or with a custom authentication plugin if you have created one. You will only need to ensure that logged-in users are mapped to the roles defined by the plugin. The role-to-user mappings must be defined explicitly for every possible authenticated user.
Subkeys:
  • `name`: Mandatory, string. Name for the role.
  • `permissions`: Mandatory, list of strings. Apache Solr permissions assigned to this role. Have a look at the example for all possible values.
  • `state`: Optional, string. Either `present` or `absent`.
| `[]` | -| `apache_solr__service_enabled` | Bool. Enables or disables the service, analogous to `systemctl enable/disable --now`. | `true` | -| `apache_solr__service` | String. Name of the systemd service. | `'solr'` | -| `apache_solr__stop_wait` | Number. Waiting up to $SOLR_STOP_WAIT seconds to see Solr running on port $SOLR_PORT". | `15` | -| `apache_solr__user` | String. Username running the systemd service. | `'solr'` | -| `apache_solr__users__group_var`/
`apache_solr__users__host_var` | List of dictionaries. This Ansible role supports Basic authentication for users with the use of the `BasicAuthPlugin`, which only provides user authentication. To control user permissions, you may need to configure `apache_solr__roles__group_var` / `apache_solr__roles__host_var`.
Note: The 'all' permission should always be the last permission in your config so that more specific permissions are applied first.
Subkeys:
  • `username`: Mandatory, string. Username.
  • `password`: Mandatory, string. Password.
  • `role`: Mandatory, string. Name of the role the user belongs to.
  • `state`: Optional, string. Either `present` or `absent`.
| `[]` | -| `apache_solr__var_dir` | String. The absolute path to the Solr home directory for each Solr node. | `'/var/solr'` | +`apache_solr__data_dir` + +* [SOLR_DATA_HOME](https://solr.apache.org/guide/solr/latest/configuration-guide/index-location-format.html). +* Type: String. +* Default: `'/var/solr/data'` + +`apache_solr__group` + +* Group running the systemd service. +* Type: String. +* Default: `'solr'` + +`apache_solr__http_bind_address` + +* [SOLR_JETTY_HOST](https://solr.apache.org/guide/solr/latest/deployment-guide/taking-solr-to-production.html#security-considerations). +* Type: String. +* Default: `'0.0.0.0'` + +`apache_solr__http_bind_port` + +* [SOLR_PORT](https://solr.apache.org/guide/solr/latest/deployment-guide/upgrading-a-solr-cluster.html#planning-your-upgrade). +* Type: Number. +* Default: `8983` + +`apache_solr__install_dir` + +* Where to install Apache Solr to. +* Type: String. +* Default: `'/opt'` + +`apache_solr__log4j_props` + +* [LOG4J_PROPS](https://solr.apache.org/guide/solr/latest/deployment-guide/taking-solr-to-production.html#log-settings). +* Type: String. +* Default: `'/var/solr/log4j2.xml'` + +`apache_solr__log_level` + +* [SOLR_LOG_LEVEL](https://solr.apache.org/guide/solr/latest/deployment-guide/configuring-logging.html). +* Type: String. +* Default: `'INFO'` + +`apache_solr__logs_dir` + +* [SOLR_LOGS_DIR](https://solr.apache.org/guide/solr/latest/deployment-guide/configuring-logging.html#permanent-logging-settings). +* Type: String. +* Default: `'/var/log/solr'` + +`apache_solr__pid_dir` + +* [SOLR_PID_DIR](https://solr.apache.org/guide/solr/latest/deployment-guide/taking-solr-to-production.html#environment-overrides-include-file). +* Type: String. +* Default: `'/var/solr'` + +`apache_solr__roles__group_var` / `apache_solr__roles__host_var` + +* Roles bridge the gap between users and permissions. The roles can be used with any of the authentication plugins or with a custom authentication plugin if you have created one. You will only need to ensure that logged-in users are mapped to the roles defined by the plugin. The role-to-user mappings must be defined explicitly for every possible authenticated user. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Name for the role. + * Type: String. + + * `permissions`: + + * Mandatory. Apache Solr permissions assigned to this role. Have a look at the example for all possible values. + * Type: List of strings. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + +`apache_solr__service` + +* Name of the systemd service. +* Type: String. +* Default: `'solr'` + +`apache_solr__service_enabled` + +* Enables or disables the service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`apache_solr__stop_wait` + +* Waiting up to $SOLR_STOP_WAIT seconds to see Solr running on port $SOLR_PORT. +* Type: Number. +* Default: `15` + +`apache_solr__user` + +* Username running the systemd service. +* Type: String. +* Default: `'solr'` + +`apache_solr__users__group_var` / `apache_solr__users__host_var` + +* This Ansible role supports Basic authentication for users with the use of the `BasicAuthPlugin`, which only provides user authentication. To control user permissions, you may need to configure `apache_solr__roles__group_var` / `apache_solr__roles__host_var`. Note: The 'all' permission should always be the last permission in your config so that more specific permissions are applied first. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `username`: + + * Mandatory. Username. + * Type: String. + + * `password`: + + * Mandatory. Password. + * Type: String. + + * `role`: + + * Mandatory. Name of the role the user belongs to. + * Type: String. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + +`apache_solr__var_dir` + +* The absolute path to the Solr home directory for each Solr node. +* Type: String. +* Default: `'/var/solr'` Example: ```yaml diff --git a/roles/apache_solr/tasks/main.yml b/roles/apache_solr/tasks/main.yml index 4018242cc..c8d8bc695 100644 --- a/roles/apache_solr/tasks/main.yml +++ b/roles/apache_solr/tasks/main.yml @@ -77,12 +77,14 @@ - name: 'Deploy {{ apache_solr__install_dir }}/solr/bin/solr.in.sh' ansible.builtin.template: + backup: true src: 'opt/solr/bin/solr.in.sh.j2' dest: '{{ apache_solr__install_dir }}/solr/bin/solr.in.sh' notify: 'apache_solr: restart solr.service' - name: 'Deploy {{ apache_solr__log4j_props }}' ansible.builtin.template: + backup: true src: 'var/solr/log4j.xml.j2' dest: '{{ apache_solr__log4j_props }}' notify: 'apache_solr: restart solr.service' @@ -138,6 +140,7 @@ - name: 'Deploy /etc/systemd/system/solr.service' ansible.builtin.template: + backup: true src: 'etc/systemd/system/solr.service.j2' dest: '/etc/systemd/system/solr.service' owner: 'root' @@ -185,6 +188,7 @@ - name: 'Deploy {{ apache_solr__var_dir }}/security.json' ansible.builtin.template: + backup: true src: 'var/solr/security.json.j2' dest: '{{ apache_solr__var_dir }}/security.json' notify: 'apache_solr: restart solr.service' diff --git a/roles/apache_tomcat/README.md b/roles/apache_tomcat/README.md index 6ca0fe7ae..a4e5a15cf 100644 --- a/roles/apache_tomcat/README.md +++ b/roles/apache_tomcat/README.md @@ -127,13 +127,30 @@ If you use the [Apache Tomcat Playbook](https://github.com/Linuxfabrik/lfops/blo ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `apache_tomcat` | Install tomcat and optional default web apps, configure Tomcat (`server.xml` and others), configure logrotating, configure access to optional web apps, create users and roles, and enable or disable the default Tomcat service. | Restarts tomcat.service | -| `apache_tomcat:configure` | Configure Tomcat (`server.xml` and others), configure logrotating. | Restarts tomcat.service | -| `apache_tomcat:webapps` | Configure access to optional web apps. | Restarts tomcat.service | -| `apache_tomcat:users` | Create users and roles. | Restarts tomcat.service | -| `apache_tomcat:state` | Enable/disable the default Tomcat service. | - | +`apache_tomcat` + +* Install tomcat and optional default web apps, configure Tomcat (`server.xml` and others), configure logrotating, configure access to optional web apps, create users and roles, and enable or disable the default Tomcat service. +* Triggers: tomcat.service restart. + +`apache_tomcat:configure` + +* Configure Tomcat (`server.xml` and others), configure logrotating. +* Triggers: tomcat.service restart. + +`apache_tomcat:webapps` + +* Configure access to optional web apps. +* Triggers: tomcat.service restart. + +`apache_tomcat:users` + +* Create users and roles. +* Triggers: tomcat.service restart. + +`apache_tomcat:state` + +* Enable/disable the default Tomcat service. +* Triggers: none. ## Mandatory Role Variables @@ -149,11 +166,41 @@ Note that for Tomcat 7 onwards, the roles required to use the manager applicatio The GUI is protected against CSRF, but the text and JMX interfaces are not. To maintain CSRF protection, users with the `manager-gui` role should not be given the `manager-script` or `manager-jmx` roles. -| Variable | Description | -| -------- | ----------- | -| `apache_tomcat__webapps_docs_context_xml_allow` | String. A regex that describes which IP addresses are allowed to access the documentation webapp. | -| `apache_tomcat__webapps_manager_context_xml_allow` | String. A regex that describes which IP addresses are allowed to access the [manager and host-manager](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html) webapps. | -| `apache_tomcat__users__host_var` /
`apache_tomcat__users__group_var` | List of dictionaries. Users allowed to access the Manager Web GUI. Subkeys:
  • `password`: Mandatory, string.
  • `roles`: Mandatory, list. Any of `admin`, `admin-gui`, `admin-script`, `manager`, `manager-gui`, `manager-script`, `manager-jmx`, `manager-status`
  • `state`: Optional, string. Either `present` or `absent`.
  • `username`: Mandatory, string.
| +`apache_tomcat__webapps_docs_context_xml_allow` + +* A regex that describes which IP addresses are allowed to access the documentation webapp. +* Type: String. + +`apache_tomcat__webapps_manager_context_xml_allow` + +* A regex that describes which IP addresses are allowed to access the [manager and host-manager](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html) webapps. +* Type: String. + +`apache_tomcat__users__host_var` / `apache_tomcat__users__group_var` + +* Users allowed to access the Manager Web GUI. +* Type: List of dictionaries. +* Subkeys: + + * `password`: + + * Mandatory. + * Type: String. + + * `roles`: + + * Mandatory. Any of `admin`, `admin-gui`, `admin-script`, `manager`, `manager-gui`, `manager-script`, `manager-jmx`, `manager-status`. + * Type: List. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + + * `username`: + + * Mandatory. + * Type: String. Example: ```yaml @@ -172,29 +219,136 @@ apache_tomcat__webapps_manager_context_xml_allow: '|192\.2\.0\.\d+|10\.80\.32\.\ ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apache_tomcat__webapps_manager_web_xml_max_file_size` | Number. [Manager App](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html). File size limit for WAR file uploads in bytes. Defaults to 50MB. | `52428800` -| `apache_tomcat__webapps_manager_web_xml_max_request_size` | Number. [Manager App](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html). Request limit in bytes. Defaults to 50MB. | `52428800` -| `apache_tomcat__context_xml_cache_max_size` | Number. The maximum size of the static resource cache in kilobytes. If not specified, the default value is `10240` (10 megabytes). This value may be changed while the web application is running (e.g. via JMX). If the cache is using more memory than the new limit the cache will attempt to reduce in size over time to meet the new limit. If necessary, cacheObjectMaxSize will be reduced to ensure that it is no larger than `cacheMaxSize/20`. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/resources.html) | `102400` | -| `apache_tomcat__env_xms` | Number. `CATALINA_OPTS=-Xms`. Specifies the initial heap size. | `'1024M'` | -| `apache_tomcat__env_xmx` | Number. `CATALINA_OPTS=-Xmx`. Specifies the maximum heap size. | `'1024M'` | -| `apache_tomcat__env_xx` | For specifiying various [JVM Options](http://www.oracle.com/technetwork/articles/java/vmoptions-jsp-140102.html) after `-XX:`:
* Boolean options are turned on with `-XX:+` and turned off with `-XX:-`.
* Numeric options are set with `-XX:=`. Numbers can include `'m'` or `'M'` for megabytes, `'k'` or `'K'` for kilobytes, and `'g'` or `'G'` for gigabytes (for example, `32k` is the same as `32768`).
* String options are set with `-XX:=`, are usually used to specify a file, a path, or a list of commands. | `'+UseParallelGC'` | -| `apache_tomcat__logrotate` | Number. Log files are rotated `count` days before being removed or mailed to the address specified in a `logrotate` mail directive. If count is `0`, old versions are removed rather than rotated. If count is `-1`, old logs are not removed at all (use with caution, may waste performance and disk space). | `{{ logrotate__rotate \| d(14) }}` | -| `apache_tomcat__roles__host_var` /
`apache_tomcat__roles__group_var` | List of dictionaries. Tomcat roles to deploy. Subkeys:
  • `name`: Mandatory, string. Name of the role.
  • `state`: Optional, string. Either `present` or `absent`.

Built-in Tomcat manager roles are:
  • `manager-gui`: Allows access to the HTML GUI and the status pages.
  • `manager-script`: Allows access to the HTTP API and the status pages.
  • `manager-jmx`: Allows access to the JMX proxy and the status pages.
  • `manager-status`: Allows access to the status pages only.
| `['admin-gui', 'manager-gui']` | -| `apache_tomcat__server_xml_ajp_port` | Number. The TCP port number on which this Connector will create a server socket and await incoming connections. Your operating system will allow only one server application to listen to a particular port number on a particular IP address. If the special value of 0 (zero) is used, then Tomcat will select a free port at random to use for this connector. This is typically only useful in embedded and testing applications. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/ajp.html) | unset (not listening on AJP) | -| `apache_tomcat__server_xml_connector_compression` | String. The Connector may use HTTP/1.1 GZIP compression in an attempt to save server bandwidth. The acceptable values for the parameter is "off" (disable compression), "on" (allow compression, which causes text data to be compressed), "force" (forces compression in all cases), or a numerical integer value (which is equivalent to "on", but specifies the minimum amount of data before the output is compressed). If the content-length is not known and compression is set to "on" or more aggressive, the output will also be compressed. If not specified, this attribute is set to "off".
Note: There is a tradeoff between using compression (saving your bandwidth) and using the sendfile feature (saving your CPU cycles). If the connector supports the sendfile feature, e.g. the NIO connector, using sendfile will take precedence over compression. The symptoms will be that static files greater that 48 Kb will be sent uncompressed. You can turn off sendfile by setting useSendfile attribute of the connector, as documented below, or change the sendfile usage threshold in the configuration of the DefaultServlet in the default conf/web.xml or in the web.xml of your web application. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) | `'on'` | -| `apache_tomcat__server_xml_connector_connection_timeout` | The number of milliseconds this Connector will wait, after accepting a connection, for the request URI line to be presented. Use a value of `-1` to indicate no (i.e. infinite) timeout. The default value is `60000` (i.e. 60 seconds) but note that the standard `server.xml` that ships with Tomcat sets this to `20000` (i.e. 20 seconds). Unless `disableUploadTimeout` is set to `false`, this timeout will also be used when reading the request body (if any). This parameter is there specifically to fight one type of Denial-Of-Service attack, whereby some malicious client(s) create a TCP connection to the server (which has the effect of reserving some resources on the server for handling this connection), and then just sit there without sending any HTTP request on that connection. By making this delay shorter, you shorten the time during which the server resources are allocated, to serve a request that will never come. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) | `20000` | -| `apache_tomcat__server_xml_connector_max_threads` | Number. The maximum number of request processing threads to be created by this Connector, which therefore determines the maximum number of simultaneous requests that can be handled. If not specified, this attribute is set to `200`. If an executor is associated with this connector, this attribute is ignored as the connector will execute tasks using the executor rather than an internal thread pool. Note that if an executor is configured any value set for this attribute will be recorded correctly but it will be reported (e.g. via JMX) as `-1` to make clear that it is not used. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) | `200` | -| `apache_tomcat__server_xml_connector_min_spare_threads` | Number. The minimum number of threads always kept running. This includes both active and idle threads. If an executor is associated with this connector, this attribute is ignored as the connector will execute tasks using the executor rather than an internal thread pool. Note that if an executor is configured any value set for this attribute will be recorded correctly but it will be reported (e.g. via JMX) as `-1` to make clear that it is not used. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) | `10` | -| `apache_tomcat__server_xml_connector_port` | The TCP port number on which this Connector will create a server socket and await incoming connections. Your operating system will allow only one server application to listen to a particular port number on a particular IP address. If the special value of `0` (zero) is used, then Tomcat will select a free port at random to use for this connector. This is typically only useful in embedded and testing applications. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) | `8080` | -| `apache_tomcat__server_xml_shutdown_port` | | `8005` | -| `apache_tomcat__service_enabled` | Bool. Enables or disables the service, analogous to `systemctl enable/disable --now`. | `true` | -| `apache_tomcat__service_state` | String. Changes the state of the service, analogous to `systemctl start/stop/restart/reload`. Possible options:
* `reloaded`
* `restarted`
* `started`
* `stopped` | `'started'` | -| `apache_tomcat__skip_admin_webapps` | Bool. If set to `true`, installation of the Manager Web GUIs will be skipped. | `false` | -| `apache_tomcat__skip_root_webapp` | Bool. If set to `true`, installation of the ROOT webapp (the tomcat startpage) will be skipped. | `false` | -| `apache_tomcat__webapps_manager_web_xml_max_file_size` | Number. [Manager App](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html). File size limit for WAR file uploads in bytes. Defaults to 50MB. | `52428800` -| `apache_tomcat__webapps_manager_web_xml_max_request_size` | Number. [Manager App](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html). Request limit in bytes. Defaults to 50MB. | `52428800` +`apache_tomcat__context_xml_cache_max_size` + +* The maximum size of the static resource cache in kilobytes. If not specified, the default value is `10240` (10 megabytes). This value may be changed while the web application is running (e.g. via JMX). If the cache is using more memory than the new limit the cache will attempt to reduce in size over time to meet the new limit. If necessary, cacheObjectMaxSize will be reduced to ensure that it is no larger than `cacheMaxSize/20`. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/resources.html) +* Type: Number. +* Default: `10240` + +`apache_tomcat__env_xms` + +* `CATALINA_OPTS=-Xms`. Specifies the initial heap size. +* Type: String. +* Default: `'1024M'` + +`apache_tomcat__env_xmx` + +* `CATALINA_OPTS=-Xmx`. Specifies the maximum heap size. +* Type: String. +* Default: `'1024M'` + +`apache_tomcat__env_xx` + +* For specifiying various [JVM Options](http://www.oracle.com/technetwork/articles/java/vmoptions-jsp-140102.html) after `-XX:`: Boolean options are turned on with `-XX:+` and turned off with `-XX:-`. Numeric options are set with `-XX:=`. Numbers can include `'m'` or `'M'` for megabytes, `'k'` or `'K'` for kilobytes, and `'g'` or `'G'` for gigabytes (for example, `32k` is the same as `32768`). String options are set with `-XX:=`, are usually used to specify a file, a path, or a list of commands. +* Type: String. +* Default: `'+UseParallelGC'` + +`apache_tomcat__logrotate` + +* Log files are rotated `count` days before being removed or mailed to the address specified in a `logrotate` mail directive. If count is `0`, old versions are removed rather than rotated. If count is `-1`, old logs are not removed at all (use with caution, may waste performance and disk space). +* Type: Number. +* Default: `{{ logrotate__rotate | d(14) }}` + +`apache_tomcat__roles__host_var` / `apache_tomcat__roles__group_var` + +* Tomcat roles to deploy. Built-in Tomcat manager roles are: `manager-gui` (allows access to the HTML GUI and the status pages), `manager-script` (allows access to the HTTP API and the status pages), `manager-jmx` (allows access to the JMX proxy and the status pages), `manager-status` (allows access to the status pages only). +* Type: List of dictionaries. +* Default: `[{'name': 'admin-gui'}, {'name': 'manager-gui'}]` +* Subkeys: + + * `name`: + + * Mandatory. Name of the role. + * Type: String. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + +`apache_tomcat__server_xml_ajp_port` + +* The TCP port number on which this Connector will create a server socket and await incoming connections. Your operating system will allow only one server application to listen to a particular port number on a particular IP address. If the special value of 0 (zero) is used, then Tomcat will select a free port at random to use for this connector. This is typically only useful in embedded and testing applications. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/ajp.html) +* Type: Number. +* Default: unset (not listening on AJP) + +`apache_tomcat__server_xml_connector_compressable_mime_types` + +* The compressable MIME types for the HTTP connector. +* Type: String. +* Default: `'text/html,text/xml,text/plain,text/css,text/javascript,application/javascript,application/json,application/xml'` + +`apache_tomcat__server_xml_connector_compression` + +* The Connector may use HTTP/1.1 GZIP compression in an attempt to save server bandwidth. The acceptable values for the parameter is "off" (disable compression), "on" (allow compression, which causes text data to be compressed), "force" (forces compression in all cases), or a numerical integer value (which is equivalent to "on", but specifies the minimum amount of data before the output is compressed). If the content-length is not known and compression is set to "on" or more aggressive, the output will also be compressed. If not specified, this attribute is set to "off". Note: There is a tradeoff between using compression (saving your bandwidth) and using the sendfile feature (saving your CPU cycles). If the connector supports the sendfile feature, e.g. the NIO connector, using sendfile will take precedence over compression. The symptoms will be that static files greater that 48 Kb will be sent uncompressed. You can turn off sendfile by setting useSendfile attribute of the connector, as documented below, or change the sendfile usage threshold in the configuration of the DefaultServlet in the default conf/web.xml or in the web.xml of your web application. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) +* Type: String. +* Default: `'on'` + +`apache_tomcat__server_xml_connector_connection_timeout` + +* The number of milliseconds this Connector will wait, after accepting a connection, for the request URI line to be presented. Use a value of `-1` to indicate no (i.e. infinite) timeout. The default value is `60000` (i.e. 60 seconds) but note that the standard `server.xml` that ships with Tomcat sets this to `20000` (i.e. 20 seconds). Unless `disableUploadTimeout` is set to `false`, this timeout will also be used when reading the request body (if any). This parameter is there specifically to fight one type of Denial-Of-Service attack, whereby some malicious client(s) create a TCP connection to the server (which has the effect of reserving some resources on the server for handling this connection), and then just sit there without sending any HTTP request on that connection. By making this delay shorter, you shorten the time during which the server resources are allocated, to serve a request that will never come. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) +* Type: Number. +* Default: `20000` + +`apache_tomcat__server_xml_connector_max_threads` + +* The maximum number of request processing threads to be created by this Connector, which therefore determines the maximum number of simultaneous requests that can be handled. If not specified, this attribute is set to `200`. If an executor is associated with this connector, this attribute is ignored as the connector will execute tasks using the executor rather than an internal thread pool. Note that if an executor is configured any value set for this attribute will be recorded correctly but it will be reported (e.g. via JMX) as `-1` to make clear that it is not used. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) +* Type: Number. +* Default: `200` + +`apache_tomcat__server_xml_connector_min_spare_threads` + +* The minimum number of threads always kept running. This includes both active and idle threads. If an executor is associated with this connector, this attribute is ignored as the connector will execute tasks using the executor rather than an internal thread pool. Note that if an executor is configured any value set for this attribute will be recorded correctly but it will be reported (e.g. via JMX) as `-1` to make clear that it is not used. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) +* Type: Number. +* Default: `10` + +`apache_tomcat__server_xml_connector_port` + +* The TCP port number on which this Connector will create a server socket and await incoming connections. Your operating system will allow only one server application to listen to a particular port number on a particular IP address. If the special value of `0` (zero) is used, then Tomcat will select a free port at random to use for this connector. This is typically only useful in embedded and testing applications. [Doc](https://tomcat.apache.org/tomcat-9.0-doc/config/http.html) +* Type: Number. +* Default: `8080` + +`apache_tomcat__server_xml_shutdown_port` + +* The TCP port number on which this server waits for a shutdown command. +* Type: Number. +* Default: `8005` + +`apache_tomcat__service_enabled` + +* Enables or disables the service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`apache_tomcat__service_state` + +* Changes the state of the service, analogous to `systemctl start/stop/restart/reload`. Possible options: `reloaded`, `restarted`, `started`, `stopped`. +* Type: String. +* Default: `'started'` + +`apache_tomcat__skip_admin_webapps` + +* If set to `true`, installation of the Manager Web GUIs will be skipped. +* Type: Bool. +* Default: `false` + +`apache_tomcat__skip_root_webapp` + +* If set to `true`, installation of the ROOT webapp (the tomcat startpage) will be skipped. +* Type: Bool. +* Default: `false` + +`apache_tomcat__webapps_manager_web_xml_max_file_size` + +* [Manager App](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html). File size limit for WAR file uploads in bytes. Defaults to 50MB. +* Type: Number. +* Default: `52428800` + +`apache_tomcat__webapps_manager_web_xml_max_request_size` + +* [Manager App](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html). Request limit in bytes. Defaults to 50MB. +* Type: Number. +* Default: `52428800` Example: ```yaml @@ -233,6 +387,7 @@ WARN org.hibernate.engine.jdbc.internal.JdbcServicesImpl:169 - HHH000342: Could The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server. Caused by: org.hibernate.HibernateException: Connection cannot be null when 'hibernate.dialect' not set ``` + * `chown -R root:tomcat /var/lib/tomcat/webapps/`? * Database Credentials correct? * Connection string correct? Example: `jdbc:mysql://localhost/linuxfabrik?createDatabaseIfNotExist=true&useEncoding=true&characterEncoding=UTF-8` diff --git a/roles/apache_tomcat/tasks/main.yml b/roles/apache_tomcat/tasks/main.yml index 021299dc9..bd733209c 100644 --- a/roles/apache_tomcat/tasks/main.yml +++ b/roles/apache_tomcat/tasks/main.yml @@ -52,6 +52,7 @@ - name: 'Deploy /etc/tomcat/server.xml' ansible.builtin.template: + backup: true src: 'etc/tomcat/{{ tomcat__installed_version }}-server.xml.j2' dest: '/etc/tomcat/server.xml' owner: 'root' @@ -61,6 +62,7 @@ - name: 'Copy tomcat config /etc/sysconfig' ansible.builtin.template: + backup: true src: 'etc/sysconfig/{{ tomcat__installed_version }}-tomcat.j2' dest: '/etc/sysconfig/tomcat' owner: 'root' @@ -69,6 +71,7 @@ - name: 'Deploy /etc/tomcat/context.xml' ansible.builtin.template: + backup: true src: 'etc/tomcat/{{ tomcat__installed_version }}-context.xml.j2' dest: '/etc/tomcat/context.xml' owner: 'root' @@ -78,6 +81,7 @@ - name: 'Deploy /etc/tomcat/logging.properties' ansible.builtin.template: + backup: true src: 'etc/tomcat/{{ tomcat__installed_version }}-logging.properties.j2' dest: '/etc/tomcat/logging.properties' owner: 'root' @@ -87,6 +91,7 @@ - name: 'Copy tomcat logrotate template to /etc/logrotate.d' ansible.builtin.template: + backup: true src: 'etc/logrotate.d/tomcat.j2' dest: '/etc/logrotate.d/tomcat' owner: 'root' @@ -104,6 +109,7 @@ - name: 'Deploy /var/lib/tomcat/webapps/docs/META-INF/context.xml' ansible.builtin.template: + backup: true src: 'var/lib/tomcat/webapps/docs/META-INF/context.xml.j2' dest: '/var/lib/tomcat/webapps/docs/META-INF/context.xml' owner: 'root' @@ -115,6 +121,7 @@ - name: 'Deploy /var/lib/tomcat/webapps/host-manager/META-INF/context.xml' ansible.builtin.template: + backup: true src: 'var/lib/tomcat/webapps/host-manager/META-INF/context.xml.j2' dest: '/var/lib/tomcat/webapps/host-manager/META-INF/context.xml' owner: 'root' @@ -126,6 +133,7 @@ - name: 'Deploy /var/lib/tomcat/webapps/manager/META-INF/context.xml' ansible.builtin.template: + backup: true src: 'var/lib/tomcat/webapps/manager/META-INF/context.xml.j2' dest: '/var/lib/tomcat/webapps/manager/META-INF/context.xml' owner: 'root' @@ -137,6 +145,7 @@ - name: 'Deploy /var/lib/tomcat/webapps/manager/WEB-INF/web.xml' ansible.builtin.template: + backup: true src: 'var/lib/tomcat/webapps/manager/WEB-INF/web.xml.j2' dest: '/var/lib/tomcat/webapps/manager/WEB-INF/web.xml' owner: 'root' @@ -159,6 +168,7 @@ - name: 'Deploy /etc/tomcat/tomcat-users.xml' ansible.builtin.template: + backup: true src: 'etc/tomcat/{{ tomcat__installed_version }}-tomcat-users.xml.j2' dest: '/etc/tomcat/tomcat-users.xml' owner: 'root' diff --git a/roles/apps/README.md b/roles/apps/README.md index 0956e5a83..da153a3ac 100644 --- a/roles/apps/README.md +++ b/roles/apps/README.md @@ -5,16 +5,33 @@ This role manages a list of applications using the OS's package manager. ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `apps` |
  • Remove apps using the package manager
  • Deploy apps using the package manager
| - | +`apps` + +* Remove apps using the package manager. +* Deploy apps using the package manager. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `apps__apps__host_var` /
`apps__apps__group_var` | List of apps to remove or to deploy. Subkeys:
  • `name`: Mandatory, string. Name of the application package.
  • `state`: Optional, string. Possible options: `present` (default), `absent`. You can use other states like `latest` ONLY if they are supported by the underlying package module(s) executed.
| `[]` | +`apps__apps__host_var` / `apps__apps__group_var` + +* List of apps to remove or to deploy. +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `name`: + + * Mandatory. Name of the application package. + * Type: String. + + * `state`: + + * Optional. Possible options: `present` (default), `absent`. You can use other states like `latest` ONLY if they are supported by the underlying package module(s) executed. + * Type: String. + * Default: `'present'` Example: ```yaml diff --git a/roles/at/README.md b/roles/at/README.md index a17149c6e..a6b5b3769 100644 --- a/roles/at/README.md +++ b/roles/at/README.md @@ -5,17 +5,24 @@ This role installs at, a daemon that allows commands to be run at a specified ti ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `at` | Installs and configures at/atd | - | -| `at:state` | Controls the state of the atd service | - | +`at` + +* Installs and configures at/atd. +* Triggers: none. + +`at:state` + +* Controls the state of the atd service. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `at__service_enabled` | Enables or disables the atd service, analogous to `systemctl enable/disable --now`. | `true` | +`at__service_enabled` + +* Enables or disables the atd service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` Example: ```yaml diff --git a/roles/audit/README.md b/roles/audit/README.md index 8db8f5139..7d335c828 100644 --- a/roles/audit/README.md +++ b/roles/audit/README.md @@ -5,23 +5,60 @@ This role installs and configures [audit](http://people.redhat.com/sgrubb/audit/ ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `audit` | Installs and configures audit | - | -| `audit:state` | Starts, stops or restarts the audit daemon | - | +`audit` + +* Installs and configures audit. +* Triggers: none. + +`audit:state` + +* Starts, stops or restarts the audit daemon. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `audit__action_mail_account` | This option should contain a valid email address or alias. The default address is root. If the email address is not local to the machine, you must make sure you have email properly configured on your machine and network. Also, this option requires that /usr/lib/sendmail exists on the machine. | `'root'` | -| `audit__admin_space_left` | This is a numeric value in megabytes that tells the audit daemon when to perform a configurable action because the system is running low on disk space. This should be considered the last chance to do something before running out of disk space. The nu‐ meric value for this parameter should be lower than the number for space_left. You may also append a percent sign (e.g. 1%) to the number to have the audit daemon cal‐ culate the number based on the disk partition size. | `'10%'` | -| `audit__admin_space_left_action` | This parameter tells the system what action to take when the system has detected that it is low on disk space. Valid values are ignore, syslog, rotate, email, exec, sus‐ pend, single, and halt. If set to ignore, the audit daemon does nothing. Syslog means that it will issue a warning to syslog. rotate will rotate logs, losing the oldest to free up space. Email means that it will send a warning to the email ac‐ count specified in action_mail_acct as well as sending the message to syslog. exec /path-to-script will execute the script. You cannot pass parameters to the script. The script is also responsible for telling the auditd daemon to resume logging once its completed its action. This can be done by adding service auditd resume to the script. Suspend will cause the audit daemon to stop writing records to the disk. The daemon will still be alive. The single option will cause the audit daemon to put the computer system in single user mode. The halt option will cause the audit daemon to shutdown the computer system. Except for rotate, it will perform this action just one time. | `'EMAIL'` | -| `audit__num_logs` | This keyword specifies the number of log files to keep if rotate is given as the max_log_file_action. If the number is < 2, logs are not rotated. This number must be 999 or less. The default is 0 - which means no rotation. As you increase the number of log files being rotated, you may need to adjust the kernel backlog setting upwards since it takes more time to rotate the files. This is typically done in /etc/au‐ dit/audit.rules. If log rotation is configured to occur, the daemon will check for excess logs and remove them in effort to keep disk space available. The excess log check is only done on startup and when a reconfigure results in a space check. | `10` | -| `audit__space_left` | If the free space in the filesystem containing log_file drops below this value, the audit daemon takes the action specified by space_left_action. If the value of space_left is specified as a whole number, it is interpreted as an absolute size in megabytes (MiB). If the value is specified as a number between 1 and 99 followed by a percentage sign (e.g., 5%), the audit daemon calculates the absolute size in megabytes based on the size of the filesystem containing log_file. (E.g., if the filesystem containing log_file is 2 gigabytes in size, and space_left is set to 25%, then the audit daemon sets space_left to approximately 500 megabytes. Note that this calculation is performed when the audit daemon starts, so if you resize the filesys‐ tem containing log_file while the audit daemon is running, you should send the audit daemon SIGHUP to re-read the configuration file and recalculate the correct percent‐ age. | `'20%'` | -| `audit__space_left_action` | This parameter tells the system what action to take when the system has detected that it is starting to get low on disk space. Valid values are ignore, syslog, rotate, email, exec, suspend, single, and halt. If set to ignore, the audit daemon does nothing. syslog means that it will issue a warning to syslog. rotate will rotate logs, losing the oldest to free up space. Email means that it will send a warning to the email account specified in action_mail_acct as well as sending the message to syslog. exec /path-to-script will execute the script. You cannot pass parameters to the script. The script is also responsible for telling the auditd daemon to resume logging once its completed its action. This can be done by adding service auditd re‐ sume to the script. suspend will cause the audit daemon to stop writing records to the disk. The daemon will still be alive. The single option will cause the audit dae‐ mon to put the computer system in single user mode. The halt option will cause the audit daemon to shutdown the computer system. Except for rotate, it will perform this action just one time. | `'ROTATE'` | -| `audit__service_enabled` | Enables or disables the auditd service, analogous to `systemctl enable/disable --now`. | `true` | +`audit__action_mail_account` + +* This option should contain a valid email address or alias. The default address is root. If the email address is not local to the machine, you must make sure you have email properly configured on your machine and network. Also, this option requires that /usr/lib/sendmail exists on the machine. +* Type: String. +* Default: `'root'` + +`audit__admin_space_left` + +* This is a numeric value in megabytes that tells the audit daemon when to perform a configurable action because the system is running low on disk space. This should be considered the last chance to do something before running out of disk space. The nu‐ meric value for this parameter should be lower than the number for space_left. You may also append a percent sign (e.g. 1%) to the number to have the audit daemon cal‐ culate the number based on the disk partition size. +* Type: String. +* Default: `'10%'` + +`audit__admin_space_left_action` + +* This parameter tells the system what action to take when the system has detected that it is low on disk space. Valid values are ignore, syslog, rotate, email, exec, sus‐ pend, single, and halt. If set to ignore, the audit daemon does nothing. Syslog means that it will issue a warning to syslog. rotate will rotate logs, losing the oldest to free up space. Email means that it will send a warning to the email ac‐ count specified in action_mail_acct as well as sending the message to syslog. exec /path-to-script will execute the script. You cannot pass parameters to the script. The script is also responsible for telling the auditd daemon to resume logging once its completed its action. This can be done by adding service auditd resume to the script. Suspend will cause the audit daemon to stop writing records to the disk. The daemon will still be alive. The single option will cause the audit daemon to put the computer system in single user mode. The halt option will cause the audit daemon to shutdown the computer system. Except for rotate, it will perform this action just one time. +* Type: String. +* Default: `'EMAIL'` + +`audit__num_logs` + +* This keyword specifies the number of log files to keep if rotate is given as the max_log_file_action. If the number is < 2, logs are not rotated. This number must be 999 or less. The default is 0 - which means no rotation. As you increase the number of log files being rotated, you may need to adjust the kernel backlog setting upwards since it takes more time to rotate the files. This is typically done in /etc/au‐ dit/audit.rules. If log rotation is configured to occur, the daemon will check for excess logs and remove them in effort to keep disk space available. The excess log check is only done on startup and when a reconfigure results in a space check. +* Type: Number. +* Default: `10` + +`audit__space_left` + +* If the free space in the filesystem containing log_file drops below this value, the audit daemon takes the action specified by space_left_action. If the value of space_left is specified as a whole number, it is interpreted as an absolute size in megabytes (MiB). If the value is specified as a number between 1 and 99 followed by a percentage sign (e.g., 5%), the audit daemon calculates the absolute size in megabytes based on the size of the filesystem containing log_file. (E.g., if the filesystem containing log_file is 2 gigabytes in size, and space_left is set to 25%, then the audit daemon sets space_left to approximately 500 megabytes. Note that this calculation is performed when the audit daemon starts, so if you resize the filesys‐ tem containing log_file while the audit daemon is running, you should send the audit daemon SIGHUP to re-read the configuration file and recalculate the correct percent‐ age. +* Type: String. +* Default: `'20%'` + +`audit__space_left_action` + +* This parameter tells the system what action to take when the system has detected that it is starting to get low on disk space. Valid values are ignore, syslog, rotate, email, exec, suspend, single, and halt. If set to ignore, the audit daemon does nothing. syslog means that it will issue a warning to syslog. rotate will rotate logs, losing the oldest to free up space. Email means that it will send a warning to the email account specified in action_mail_acct as well as sending the message to syslog. exec /path-to-script will execute the script. You cannot pass parameters to the script. The script is also responsible for telling the auditd daemon to resume logging once its completed its action. This can be done by adding service auditd re‐ sume to the script. suspend will cause the audit daemon to stop writing records to the disk. The daemon will still be alive. The single option will cause the audit dae‐ mon to put the computer system in single user mode. The halt option will cause the audit daemon to shutdown the computer system. Except for rotate, it will perform this action just one time. +* Type: String. +* Default: `'ROTATE'` + +`audit__service_enabled` + +* Enables or disables the auditd service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` Example: ```yaml diff --git a/roles/audit/tasks/main.yml b/roles/audit/tasks/main.yml index b8d33f9a1..d6c1cac43 100644 --- a/roles/audit/tasks/main.yml +++ b/roles/audit/tasks/main.yml @@ -9,6 +9,7 @@ - name: 'Deploy /etc/audit/auditd.conf' ansible.builtin.template: + backup: true src: 'etc/audit/auditd.conf.j2' dest: '/etc/audit/auditd.conf' owner: 'root' diff --git a/roles/bind/README.md b/roles/bind/README.md index bd77d81f5..1b7ecc3bd 100644 --- a/roles/bind/README.md +++ b/roles/bind/README.md @@ -4,19 +4,74 @@ This role installs and configures [bind](https://www.isc.org/bind/) as a DNS ser ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `bind` | Installs and configures bind | Restarts named.service | -| `bind:configure` | Manages the main named config and the zones | Reloads named.service | -| `bind:state` | Manages the state of the named systemd service | - | +`bind` + +* Installs and configures bind. +* Triggers: named.service restart. + +`bind:configure` + +* Manages the main named config and the zones. +* Triggers: named.service reload. + +`bind:state` + +* Manages the state of the named systemd service. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `bind__trusted_networks` | List of networks from which DNS queries are allowed. Results in the `trusted` ACL in the config. | -| `bind__zones` | List of dictionaries defining the zone files with the DNS records. Subkeys:
  • `name`: Mandatory, string. The name of the zone. Suffix with `in-addr.arpa` (IPv4) / `ip6.arpa` (IPv6) for reverse zones.
  • `file`: Optional, string. The filename for the zone file under `/var/named/`. Defaults to `name` with `.zone` suffix.
  • `type`: Optional, string. [type](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-type) of the zone. Defaults to `master`.
  • `forwarders`: Optional, list of strings. [forwarders](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-forwarders) of the zone. Defaults to `[]`, as this is generally not useful for `type: 'master'`.
  • `allow_transfer`: Optional, list of strings. [allow-transfer](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-allow-transfer) of the zone to a secondary. Defaults to `[]`.
  • `masters`: Optional, list of strings. [masters](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-primaries) of from which to fetch the zone. Defaults to `[]`.
  • `raw`: Optional, multiline string. The raw content of the zone file.
| +`bind__trusted_networks` + +* List of networks from which DNS queries are allowed. Results in the `trusted` ACL in the config. +* Type: List of strings. + +`bind__zones` + +* List of dictionaries defining the zone files with the DNS records. +* Type: List of dictionaries. + +* Subkeys: + + * `name`: + + * Mandatory. The name of the zone. Suffix with `in-addr.arpa` (IPv4) / `ip6.arpa` (IPv6) for reverse zones. + * Type: String. + + * `file`: + + * Optional. The filename for the zone file under `/var/named/`. Defaults to `name` with `.zone` suffix. + * Type: String. + + * `type`: + + * Optional. [type](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-type) of the zone. + * Type: String. + * Default: `'master'` + + * `forwarders`: + + * Optional. [forwarders](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-forwarders) of the zone. + * Type: List of strings. + * Default: `[]` + + * `allow_transfer`: + + * Optional. [allow-transfer](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-allow-transfer) of the zone to a secondary. + * Type: List of strings. + * Default: `[]` + + * `masters`: + + * Optional. [masters](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-primaries) of from which to fetch the zone. + * Type: List of strings. + * Default: `[]` + + * `raw`: + + * Optional. The raw content of the zone file. + * Type: Multiline string. Example: ```yaml @@ -60,20 +115,94 @@ bind__zones: ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `bind__allow_new_zones` | Boolean. If `true`, then zones can be added at runtime via `rndc addzone`. | `false` | -| `bind__allow_query_cache` | List of ACLs (use `'trusted'` for the `bind__trusted_networks`) or [Address Match Lists](https://bind9.readthedocs.io/en/latest/reference.html#address-match-lists) which are allowed to query the cache. This effectively controls who can use recursion. When setting `bind__recursion: false`, it makes sense to set this to `'none'` to prevent any answer. | `['trusted']` -| `bind__allow_recursion` | List of ACLs (use `'trusted'` for the `bind__trusted_networks`) or [Address Match Lists](https://bind9.readthedocs.io/en/latest/reference.html#address-match-lists) which are allowed to initiate recursive queries. When setting `bind__recursion: false`, it makes sense to set this to `'none'` to prevent any answer. | `['trusted']` -| `bind__allow_transfer` | List of strings. The global [`allow-transfer`](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-allow-transfer) option. Can be overwritten per zone. | `['none']` | -| `bind__forwarders` | List of DNS servers to which DNS queries to unknown domain names should be forwarded. | `['1.0.0.1', '1.1.1.1']` | -| `bind__keys` | List of dictionaries. [`key`s](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-key) for use with TSIG or the command channel (`rndc`). Subkeys:
  • `name`: Mandatory, string. Name of the key.
  • `algorithm`: Mandatory, string. [`algorithm`](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-algorithm) of the key.
  • `secret`: Mandatory, string. The key's [`secret`](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-secret). Will be base64 encoded by the role.
| `[]` | -| `bind__listen_ipv6` | Boolean. Enables or disables listening on IPv6. | `false` | -| `bind__listen_on_addresses` | List of addresses on which the server will listen. This indirectly sets the listening interface(s). | `['any']` | -| `bind__named_conf_raw` | Multiline string. Raw content which will be appended to the end of `/etc/named.conf` | unset | -| `bind__named_service_enabled` | Boolean. Enables or disables the named service, analogous to `systemctl enable/disable --now`. Possible options: | `true` | -| `bind__recursion` | Boolean. Defines whether recursion and caching are allowed. Disabling recursion is recommended for authorative name servers. | `true` | -| `bind__rpz_zone` | String. Name of the RPZ zone. Setting this enables the usage of a reverse-policy zone (have a look at https://dnsrpz.info/, basically acts as a `/etc/hosts` file for all clients). To use this, also create a zone with `name: '{{ bind__rpz_zone }}'` in `bind__zones`. | unset | +`bind__allow_new_zones` + +* If `true`, then zones can be added at runtime via `rndc addzone`. +* Type: Bool. +* Default: `false` + +`bind__allow_query_cache` + +* List of ACLs (use `'trusted'` for the `bind__trusted_networks`) or [Address Match Lists](https://bind9.readthedocs.io/en/latest/reference.html#address-match-lists) which are allowed to query the cache. This effectively controls who can use recursion. When setting `bind__recursion: false`, it makes sense to set this to `'none'` to prevent any answer. +* Type: List of strings. +* Default: `['trusted']` + +`bind__allow_recursion` + +* List of ACLs (use `'trusted'` for the `bind__trusted_networks`) or [Address Match Lists](https://bind9.readthedocs.io/en/latest/reference.html#address-match-lists) which are allowed to initiate recursive queries. When setting `bind__recursion: false`, it makes sense to set this to `'none'` to prevent any answer. +* Type: List of strings. +* Default: `['trusted']` + +`bind__allow_transfer` + +* The global [`allow-transfer`](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-allow-transfer) option. Can be overwritten per zone. +* Type: List of strings. +* Default: `['none']` + +`bind__forwarders` + +* List of DNS servers to which DNS queries to unknown domain names should be forwarded. +* Type: List of strings. +* Default: `['1.0.0.1', '1.1.1.1']` + +`bind__keys` + +* [`key`s](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-key) for use with TSIG or the command channel (`rndc`). +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `name`: + + * Mandatory. Name of the key. + * Type: String. + + * `algorithm`: + + * Mandatory. [`algorithm`](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-algorithm) of the key. + * Type: String. + + * `secret`: + + * Mandatory. The key's [`secret`](https://bind9.readthedocs.io/en/latest/reference.html#namedconf-statement-secret). Will be base64 encoded by the role. + * Type: String. + +`bind__listen_ipv6` + +* Enables or disables listening on IPv6. +* Type: Bool. +* Default: `false` + +`bind__listen_on_addresses` + +* List of addresses on which the server will listen. This indirectly sets the listening interface(s). +* Type: List of strings. +* Default: `['any']` + +`bind__named_conf_raw` + +* Raw content which will be appended to the end of `/etc/named.conf`. +* Type: Multiline string. +* Default: unset + +`bind__named_service_enabled` + +* Enables or disables the named service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`bind__recursion` + +* Defines whether recursion and caching are allowed. Disabling recursion is recommended for authorative name servers. +* Type: Bool. +* Default: `true` + +`bind__rpz_zone` + +* Name of the RPZ zone. Setting this enables the usage of a reverse-policy zone (have a look at https://dnsrpz.info/, basically acts as a `/etc/hosts` file for all clients). To use this, also create a zone with `name: '{{ bind__rpz_zone }}'` in `bind__zones`. +* Type: String. +* Default: unset Example: ```yaml @@ -196,7 +325,6 @@ bind__zones: ``` - ## License [The Unlicense](https://unlicense.org/) diff --git a/roles/bind/tasks/main.yml b/roles/bind/tasks/main.yml index 61175f4d8..55acc0fc5 100644 --- a/roles/bind/tasks/main.yml +++ b/roles/bind/tasks/main.yml @@ -15,6 +15,7 @@ - name: 'Deploy /etc/sysconfig/named' ansible.builtin.template: + backup: true src: 'etc/sysconfig/named.j2' dest: '/etc/sysconfig/named' owner: 'root' @@ -24,6 +25,7 @@ - name: 'Deploy /etc/named.conf' ansible.builtin.template: + backup: true src: 'etc/named.conf.j2' dest: '/etc/named.conf' owner: 'root' @@ -40,6 +42,7 @@ - name: 'Deploy the forward and reverse zones' ansible.builtin.template: + backup: true src: 'var/named/raw.zone.j2' dest: '/var/named/{{ item["file"] | d(item["name"] ~ ".zone") }}' owner: 'root' diff --git a/roles/blocky/README.md b/roles/blocky/README.md index d99def13b..69c8b3100 100644 --- a/roles/blocky/README.md +++ b/roles/blocky/README.md @@ -2,25 +2,46 @@ This role installs and configures blocky, a DNS proxy and ad-blocker for the local network written in Go. -This Ansible role does not provide a way to template the blocky configuration file – it is simply far too dynamic. Also it does not currently support multiple blocky instances synchronizing their caches and locking state via redis. If you have multiple instances, just deploy the same config.yml to a group of blocky instances, and you will be fine. +This Ansible role does not provide a way to template the blocky configuration file -- it is simply far too dynamic. Also it does not currently support multiple blocky instances synchronizing their caches and locking state via redis. If you have multiple instances, just deploy the same config.yml to a group of blocky instances, and you will be fine. ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `blocky` | Downloads blocky from GitHub to the Ansible control node, copies it to the remote host, configures Systemd, applies a default configuration, and overrides it with a custom configuration (if available). | Restarts blocky.service | -| `blocky:configure` | Copies and applies a custom blocky configuration file. | Restarts blocky.service | -| `blocky:state` | Enable/disable the default blocky service. | - | +`blocky` + +* Downloads blocky from GitHub to the Ansible control node, copies it to the remote host, configures Systemd, applies a default configuration, and overrides it with a custom configuration (if available). +* Triggers: blocky.service restart. + +`blocky:configure` + +* Copies and applies a custom blocky configuration file. +* Triggers: blocky.service restart. + +`blocky:state` + +* Enable/disable the default blocky service. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `blocky__config_yml` | String. The config YAML for blocky to deploy. | unset | -| `blocky__service_enabled` | Bool. Enables or disables the service, analogous to `systemctl enable/disable --now`. | `true` | -| `blocky__version` | String. The version of blocky to install. Possible options: `'latest'`, or any from https://github.com/0xERR0R/blocky/releases. | `'latest'` | +`blocky__config_yml` + +* The config YAML for blocky to deploy. +* Type: String. +* Default: unset + +`blocky__service_enabled` + +* Enables or disables the service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`blocky__version` + +* The version of blocky to install. Possible options: `'latest'`, or any from https://github.com/0xERR0R/blocky/releases. +* Type: String. +* Default: `'latest'` Example: ```yaml diff --git a/roles/blocky/tasks/main.yml b/roles/blocky/tasks/main.yml index 3e52dff41..d8c840ae8 100644 --- a/roles/blocky/tasks/main.yml +++ b/roles/blocky/tasks/main.yml @@ -112,6 +112,7 @@ - name: 'Deploy /etc/systemd/system/blocky.service' ansible.builtin.template: + backup: true src: 'etc/systemd/system/blocky.service.j2' dest: '/etc/systemd/system/blocky.service' owner: 'root' diff --git a/roles/borg_local/README.md b/roles/borg_local/README.md index e401abb96..9f328f384 100644 --- a/roles/borg_local/README.md +++ b/roles/borg_local/README.md @@ -23,18 +23,28 @@ umount /mnt/borg ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `borg_local` | Installs and configures borg | - | -| `borg_local:configure` | Configures borg | - | -| `borg_local:state` | Manages the state of the borg timer | - | +`borg_local` + +* Installs and configures borg. +* Triggers: none. + +`borg_local:configure` + +* Configures borg. +* Triggers: none. + +`borg_local:state` + +* Manages the state of the borg timer. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `borg_local__passphrase` | Passphrase for the Borg repositories. | +`borg_local__passphrase` + +* Passphrase for the Borg repositories. +* Type: String. Example: ```yaml @@ -44,22 +54,148 @@ borg_local__passphrase: 'linuxfabrik' ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `borg_local__backup_dir` | The directory where the backup repositories will be created. | `/backup` | -| `borg_local__backup_opts__host_vars` /
`borg_local__backup_opts__group_vars` | The list of options used by borg. Subkeys:
  • `option`: Mandatory, string. The option to be used.
  • `state`: Optional, string. Either `present` or `absent`. Defaults to `present`.
|
  • `'--stats'`
  • `'--progress'`
  • `'--one-file-system'`
  • `'--compression lz4'`
  • `'--checkpoint-interval 86400'`
| -| `borg_local__exclude_files__host_vars` /
`borg_local__exclude_files__group_vars` | The list of files or directories which should be excluded from the backup. Excludes act as filters within the included paths. Subkeys:
  • `file`: Mandatory, string. The file or directory to be excluded.
  • `state`: Optional, string. Either `present` or `absent`. Defaults to `present`.
|
  • `'/root/.cache'`
  • `'*.svn*'`
  • `'*.git*'`
  • `'*.tmp'`
  • `'*.temp'`
  • `'*/cache/*'`
  • `'*/log/*'`
| -| `borg_local__icinga2_api_url` | The URL of the Icinga2 API (usually on the Icinga2 Master). This will be used to set a downtime for the corresponding ClamAV service. | `'https://{{ icinga2_agent__icinga2_master_host \| d("") }}:{{ icinga2_agent__icinga2_master_port \| d(5665) }}'` | -| `borg_local__icinga2_api_user_login` | The Icinga2 API User to set the downtime for the corresponding ClamAV service. | unset | -| `borg_local__icinga2_hostname` | The hostname of the Icinga2 host on which the downtime should be set. | `'{{ ansible_facts["nodename"] }}'` | -| `borg_local__include_files__host_vars` /
`borg_local__include_files__group_vars` | The list of files or directories which should be included in the backup. Only the listed paths are backed up, everything else is implicitly excluded. Subkeys:
  • `file`: Mandatory, string. The file or directory to be included.
  • `state`: Optional, string. Either `present` or `absent`. Defaults to `present`.
|
  • `'/etc'`
  • `'/home'`
  • `'/opt'`
  • `'/root'`
  • `'/var/spool/cron'`
| -| `borg_local__on_calendar_daily_hour` | The hour of the daily backup | `23` | -| `borg_local__on_calendar_daily` | The time at which the daily backup will run. Once per day. | `'*-*-* {{ borg_local__on_calendar_daily_hour }}:{{ 45\|random(seed=inventory_hostname) }}'` | -| `borg_local__on_calendar_hourly` | The time at which the hourly backup will run. Once per hour. | `'*-*-* *:{{ 59 \|random(start=45) }}'` | -| `borg_local__retention_daily` | The number of daily backups to keep. | `'14d'` | -| `borg_local__retention_hourly` | The number of hourly backups to keep. | `'99H'` | -| `borg_local__timer_daily_enabled` | Whether the daily backup should be enabled. | `true` | -| `borg_local__timer_hourly_enabled` | Whether the hourly backup should be enabled. | `false` | +`borg_local__backup_dir` + +* The directory where the backup repositories will be created. +* Type: String. +* Default: `'/backup'` + +`borg_local__backup_opts__host_var` / `borg_local__backup_opts__group_var` + +* The list of options used by borg. +* Type: List of dictionaries. +* Default: + + * `'--stats'` + * `'--progress'` + * `'--one-file-system'` + * `'--compression lz4'` + * `'--checkpoint-interval 86400'` + +* Subkeys: + + * `option`: + + * Mandatory. The option to be used. + * Type: String. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + * Default: `'present'` + +`borg_local__exclude_files__host_var` / `borg_local__exclude_files__group_var` + +* The list of files or directories which should be excluded from the backup. Excludes act as filters within the included paths. +* Type: List of dictionaries. +* Default: + + * `'/root/.cache'` + * `'*.svn*'` + * `'*.git*'` + * `'*.tmp'` + * `'*.temp'` + * `'*/cache/*'` + * `'*/log/*'` + +* Subkeys: + + * `file`: + + * Mandatory. The file or directory to be excluded. + * Type: String. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + * Default: `'present'` + +`borg_local__icinga2_api_url` + +* The URL of the Icinga2 API (usually on the Icinga2 Master). This will be used to set a downtime for the corresponding ClamAV service. +* Type: String. +* Default: `'https://{{ icinga2_agent__icinga2_master_host | d("") }}:{{ icinga2_agent__icinga2_master_port | d(5665) }}'` + +`borg_local__icinga2_api_user_login` + +* The Icinga2 API User to set the downtime for the corresponding ClamAV service. +* Type: Dictionary. +* Default: unset + +`borg_local__icinga2_hostname` + +* The hostname of the Icinga2 host on which the downtime should be set. +* Type: String. +* Default: `'{{ ansible_facts["nodename"] }}'` + +`borg_local__include_files__host_var` / `borg_local__include_files__group_var` + +* The list of files or directories which should be included in the backup. Only the listed paths are backed up, everything else is implicitly excluded. +* Type: List of dictionaries. +* Default: + + * `'/etc'` + * `'/home'` + * `'/opt'` + * `'/root'` + * `'/var/spool/cron'` + +* Subkeys: + + * `file`: + + * Mandatory. The file or directory to be included. + * Type: String. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + * Default: `'present'` + +`borg_local__on_calendar_daily_hour` + +* The hour of the daily backup. +* Type: Number. +* Default: `23` + +`borg_local__on_calendar_daily` + +* The time at which the daily backup will run. Once per day. +* Type: String. +* Default: `'*-*-* {{ borg_local__on_calendar_daily_hour }}:{{ 45 | random(seed=inventory_hostname) }}'` + +`borg_local__on_calendar_hourly` + +* The time at which the hourly backup will run. Once per hour. +* Type: String. +* Default: `'*-*-* *:{{ 59 | random(start=45, seed=inventory_hostname) }}'` + +`borg_local__retention_daily` + +* The number of daily backups to keep. +* Type: String. +* Default: `'14d'` + +`borg_local__retention_hourly` + +* The number of hourly backups to keep. +* Type: String. +* Default: `'99H'` + +`borg_local__timer_daily_enabled` + +* Whether the daily backup should be enabled. +* Type: Bool. +* Default: `true` + +`borg_local__timer_hourly_enabled` + +* Whether the hourly backup should be enabled. +* Type: Bool. +* Default: `false` Example: ```yaml diff --git a/roles/borg_local/tasks/main.yml b/roles/borg_local/tasks/main.yml index 292056165..e817bd56a 100644 --- a/roles/borg_local/tasks/main.yml +++ b/roles/borg_local/tasks/main.yml @@ -35,6 +35,7 @@ - name: 'Deploy /etc/borg/borg-passphrase' ansible.builtin.template: + backup: true src: 'etc/borg/borg-passphrase.j2' dest: '/etc/borg/borg-passphrase' owner: 'root' @@ -43,6 +44,7 @@ - name: 'Deploy /etc/borg/borg.conf' ansible.builtin.template: + backup: true src: 'etc/borg/borg.conf.j2' dest: '/etc/borg/borg.conf' owner: 'root' @@ -51,6 +53,7 @@ - name: 'Deploy the borg backup wrapper script' ansible.builtin.template: + backup: true src: 'usr/local/bin/borg-backup.j2' dest: '/usr/local/bin/borg-backup' owner: 'root' diff --git a/roles/chrony/README.md b/roles/chrony/README.md index adc4e36c7..6a4f1c453 100644 --- a/roles/chrony/README.md +++ b/roles/chrony/README.md @@ -8,10 +8,15 @@ This role installs and configures [chrony](https://chrony.tuxfamily.org/), a NTP ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `chrony` | Installs and configures chrony | Restarts chronyd.service | -| `chrony:state` | Manages the state of the chrony service | - | +`chrony` + +* Installs and configures chrony. +* Triggers: chronyd.service restart. + +`chrony:state` + +* Manages the state of the chrony service. +* Triggers: none. ## Mandatory Role Variables @@ -21,14 +26,41 @@ This role does not have any mandatory variables. However, either `chrony__ntp_po ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `chrony__allow` | A list of subnets which are allowed to access the server as a NTP server. Setting this effectively turns this server into a NTP server. | `[]` | -| `chrony__bindaddress` | On which address chrony should listen. Can be used to restrict access to a certain address. | unset | -| `chrony__binddevice` | To which network interface chrony should bind. Can be used to restrict access to certain interfaces. Note that this does not work with enforcing SELinux. Try using `chrony__bindaddress`. | unset | -| `chrony__ntp_pools` | A list of NTP server pools. Same as `chrony__ntp_servers`, except that it is used to specify a pool of NTP servers rather than a single NTP server. | `[]` | -| `chrony__ntp_servers` | A list of NTP servers which should be used as a time source. The `ibust` option is always used, meaning chronyd will start with a burst of 4-8 requests in order to make the first update of the clock sooner. | `[]` | -| `chrony__service_enabled` | Enables or disables the chrony service, analogous to `systemctl enable/disable --now`. | `true` | +`chrony__allow` + +* A list of subnets which are allowed to access the server as a NTP server. Setting this effectively turns this server into a NTP server. +* Type: List. +* Default: `[]` + +`chrony__bindaddress` + +* On which address chrony should listen. Can be used to restrict access to a certain address. +* Type: String. +* Default: unset + +`chrony__binddevice` + +* To which network interface chrony should bind. Can be used to restrict access to certain interfaces. Note that this does not work with enforcing SELinux. Try using `chrony__bindaddress`. +* Type: String. +* Default: unset + +`chrony__ntp_pools` + +* A list of NTP server pools. Same as `chrony__ntp_servers`, except that it is used to specify a pool of NTP servers rather than a single NTP server. +* Type: List. +* Default: `[]` + +`chrony__ntp_servers` + +* A list of NTP servers which should be used as a time source. The `ibust` option is always used, meaning chronyd will start with a burst of 4-8 requests in order to make the first update of the clock sooner. +* Type: List. +* Default: `[]` + +`chrony__service_enabled` + +* Enables or disables the chrony service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` Example: ```yaml diff --git a/roles/chrony/tasks/main.yml b/roles/chrony/tasks/main.yml index 6e2425cf5..efd185889 100644 --- a/roles/chrony/tasks/main.yml +++ b/roles/chrony/tasks/main.yml @@ -7,6 +7,7 @@ - name: 'Deploy /etc/chrony.conf' ansible.builtin.template: + backup: true src: 'etc/chrony.conf.j2' dest: '/etc/chrony.conf' mode: 0o644 diff --git a/roles/clamav/README.md b/roles/clamav/README.md index 64a60e2d4..2dfc2b732 100644 --- a/roles/clamav/README.md +++ b/roles/clamav/README.md @@ -23,40 +23,155 @@ If you use the [ClamAV Playbook](https://github.com/Linuxfabrik/lfops/blob/main/ ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `clamav` | Installs and configures ClamAV | Restarts clamav-clamonacc.service | -| `clamav:state` | Manages the states of various ClamAV services and timers | - | -| `clamav:configure` | Manages the various ClamAV config files | Restarts clamav-clamonacc.service, clamd@scan.service | +`clamav` + +* Installs and configures ClamAV. +* Triggers: clamav-clamonacc.service restart. + +`clamav:state` + +* Manages the states of various ClamAV services and timers. +* Triggers: none. + +`clamav:configure` + +* Manages the various ClamAV config files. +* Triggers: clamav-clamonacc.service restart, clamd@scan.service restart. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `clamav__clamd_service_enabled` | Enables or disables the clamd background service, analogous to `systemctl enable/disable`. The clamd service is required for on-access scanning and full-scans. | `true` | -| `clamav__clamdscan_on_calendar` | When the full-scan should be run. Have a look at [systemd.time(7)](https://www.freedesktop.org/software/systemd/man/systemd.time.html) for the format. | `'*-*-* 21:{{ 59 | random(seed=inventory_hostname) }}'` | -| `clamav__clamdscan_paths` | Which paths should be scanned during the full-scan. | `'{{ clamav__scan_on_access_include_paths }}'` | -| `clamav__clamdscan_timer_enabled` | Enables or disables the clamdscan timer for the periodic full-scan, analogous to `systemctl enable/disable`. | `false` | -| `clamav__clamonacc_service_enabled` | Enables or disables the on-access scanning service, analogous to `systemctl enable/disable`. | `false` | -| `clamav__freshclam_private_mirror` | "This option allows you to easily point freshclam to private mirrors" (see `man freshclam.conf`). | `[]` | -| `clamav__freshclam_service_enabled` | Enables or disables the freshclam service, analogous to `systemctl enable/disable`. Freshclam is responsible for updating the official ClamAV signatures. | `true` | -| `clamav__mail_from` | Username with access to the mail server. Required to send mail notifications for found viruses. | `'{{ mailto_root__from }}'` | -| `clamav__mail_recipients` | List recipient addresses to which the mail notifications should be sent. | `'{{ mailto_root__to }}'` | -| `clamav__mail_subject_prefix` | This will set a prefix that will be showed in front of the hostname. Can be used to separate servers by environment or customer. | `''` | -| `clamav__scan_alert_broken_executables` | "With this option clamav will try to detect broken executables (both PE and ELF) and alert on them with the Broken.Executable heuristic signature." | `true` | -| `clamav__scan_detect_pua` | On-access & full-scans: "Detect Possibly Unwanted Applications." | `true` | -| `clamav__scan_max_directory_recursion` | "Maximum depth directories are scanned at." | `20` | -| `clamav__scan_max_file_size` | Full-scan: "Files larger than this limit won't be scanned." | `'450M'` | -| `clamav__scan_max_recursion` | Specifies how deeply nested archives should be scanned recursively. | `30` | -| `clamav__scan_max_scan_size` | "Sets the maximum amount of data to be scanned for each input file." | `'450M'` | -| `clamav__scan_on_access_exclude_paths` | On-access: "Set the exclude paths. All subdirectories are also excluded." | `[]` | -| `clamav__scan_on_access_include_paths` | On-access: "Set the include paths (all files inside them will be scanned)." | `[]` | -| `clamav__scan_on_access_max_file_size` | On-access: "Don't scan files larger than this." | `'500M'` | -| `clamav__scan_on_access_prevention` | On-access: Prevents access to the file if a virus is found. Note that this also blocks the full-scan from accessing the files. | `false` | -| `clamav__whitelist_files` | Whitelist specific files. Use `sigtool --md5 my-false-positive-file` to generate the entry. Have a look at the [official documentation](https://docs.clamav.net/manual/Signatures/AllowLists.html#file-allow-lists) for details. | `[]` | -| `clamav__whitelist_signatures` | Whitelist specific signatures. Note that it is possible that one needs to whitelist multiple signatures for the same finding, as it can come from different databases with different names. Have a look at the example below and the [official documentation](https://docs.clamav.net/manual/Signatures/AllowLists.html#signature-ignore-lists) for details. | `[]` | +`clamav__clamd_service_enabled` + +* Enables or disables the clamd background service, analogous to `systemctl enable/disable`. The clamd service is required for on-access scanning and full-scans. +* Type: Bool. +* Default: `true` + +`clamav__clamdscan_on_calendar` + +* When the full-scan should be run. Have a look at [systemd.time(7)](https://www.freedesktop.org/software/systemd/man/systemd.time.html) for the format. +* Type: String. +* Default: `'*-*-* 21:{{ 59 | random(seed=inventory_hostname) }}'` + +`clamav__clamdscan_paths` + +* Which paths should be scanned during the full-scan. +* Type: String. +* Default: `'{{ clamav__scan_on_access_include_paths }}'` + +`clamav__clamdscan_timer_enabled` + +* Enables or disables the clamdscan timer for the periodic full-scan, analogous to `systemctl enable/disable`. +* Type: Bool. +* Default: `false` + +`clamav__clamonacc_service_enabled` + +* Enables or disables the on-access scanning service, analogous to `systemctl enable/disable`. +* Type: Bool. +* Default: `false` + +`clamav__freshclam_private_mirror` + +* "This option allows you to easily point freshclam to private mirrors" (see `man freshclam.conf`). +* Type: List. +* Default: `[]` + +`clamav__freshclam_service_enabled` + +* Enables or disables the freshclam service, analogous to `systemctl enable/disable`. Freshclam is responsible for updating the official ClamAV signatures. +* Type: Bool. +* Default: `true` + +`clamav__mail_from` + +* Username with access to the mail server. Required to send mail notifications for found viruses. +* Type: String. +* Default: `'{{ mailto_root__from }}'` + +`clamav__mail_recipients` + +* List recipient addresses to which the mail notifications should be sent. +* Type: String. +* Default: `'{{ mailto_root__to }}'` + +`clamav__mail_subject_prefix` + +* This will set a prefix that will be showed in front of the hostname. Can be used to separate servers by environment or customer. +* Type: String. +* Default: `''` + +`clamav__scan_alert_broken_executables` + +* "With this option clamav will try to detect broken executables (both PE and ELF) and alert on them with the Broken.Executable heuristic signature." +* Type: Bool. +* Default: `true` + +`clamav__scan_detect_pua` + +* On-access & full-scans: "Detect Possibly Unwanted Applications." +* Type: Bool. +* Default: `true` + +`clamav__scan_max_directory_recursion` + +* "Maximum depth directories are scanned at." +* Type: Number. +* Default: `20` + +`clamav__scan_max_file_size` + +* Full-scan: "Files larger than this limit won't be scanned." +* Type: String. +* Default: `'450M'` + +`clamav__scan_max_recursion` + +* Specifies how deeply nested archives should be scanned recursively. +* Type: Number. +* Default: `30` + +`clamav__scan_max_scan_size` + +* "Sets the maximum amount of data to be scanned for each input file." +* Type: String. +* Default: `'450M'` + +`clamav__scan_on_access_exclude_paths` + +* On-access: "Set the exclude paths. All subdirectories are also excluded." +* Type: List. +* Default: `[]` + +`clamav__scan_on_access_include_paths` + +* On-access: "Set the include paths (all files inside them will be scanned)." +* Type: List. +* Default: `[]` + +`clamav__scan_on_access_max_file_size` + +* On-access: "Don't scan files larger than this." +* Type: String. +* Default: `'500M'` + +`clamav__scan_on_access_prevention` + +* On-access: Prevents access to the file if a virus is found. Note that this also blocks the full-scan from accessing the files. +* Type: Bool. +* Default: `false` + +`clamav__whitelist_files` + +* Whitelist specific files. Use `sigtool --md5 my-false-positive-file` to generate the entry. Have a look at the [official documentation](https://docs.clamav.net/manual/Signatures/AllowLists.html#file-allow-lists) for details. +* Type: List. +* Default: `[]` + +`clamav__whitelist_signatures` +* Whitelist specific signatures. Note that it is possible that one needs to whitelist multiple signatures for the same finding, as it can come from different databases with different names. Have a look at the example below and the [official documentation](https://docs.clamav.net/manual/Signatures/AllowLists.html#signature-ignore-lists) for details. +* Type: List. +* Default: `[]` Example: ```yaml diff --git a/roles/clamav/tasks/main.yml b/roles/clamav/tasks/main.yml index 6fa4dd87d..3dd5e910c 100644 --- a/roles/clamav/tasks/main.yml +++ b/roles/clamav/tasks/main.yml @@ -15,6 +15,7 @@ - name: 'Deploy /etc/systemd/system/clamav-clamonacc.service.d/fdpass.conf' ansible.builtin.template: + backup: true src: 'etc/systemd/system/clamav-clamonacc.service.d/fdpass.conf.j2' dest: '/etc/systemd/system/clamav-clamonacc.service.d/fdpass.conf' owner: 'root' @@ -25,6 +26,7 @@ - name: 'Deploy /etc/systemd/system/clamav-clamdscan.service' ansible.builtin.template: + backup: true src: 'etc/systemd/system/clamav-clamdscan.service.j2' dest: '/etc/systemd/system/clamav-clamdscan.service' owner: 'root' @@ -33,6 +35,7 @@ - name: 'Deploy /etc/systemd/system/clamav-clamdscan.timer' ansible.builtin.template: + backup: true src: 'etc/systemd/system/clamav-clamdscan.timer.j2' dest: '/etc/systemd/system/clamav-clamdscan.timer' owner: 'root' @@ -42,6 +45,7 @@ - name: 'Deploy /usr/local/bin/clamav-virusevent.sh' ansible.builtin.template: + backup: true src: 'usr/local/bin/clamav-virusevent.sh.j2' dest: '/usr/local/bin/clamav-virusevent.sh' owner: 'root' @@ -50,6 +54,7 @@ - name: 'Deploy /etc/logrotate.d/clamav' ansible.builtin.template: + backup: true src: 'etc/logrotate.d/clamav.j2' dest: '/etc/logrotate.d/clamav' owner: 'root' @@ -64,6 +69,7 @@ - name: 'Deploy /etc/clamd.d/scan.conf' ansible.builtin.template: + backup: true src: 'etc/clamd.d/scan.conf.j2' dest: '/etc/clamd.d/scan.conf' owner: 'root' @@ -82,6 +88,7 @@ - name: 'Deploy /etc/freshclam.conf' ansible.builtin.template: + backup: true src: 'etc/freshclam.conf.j2' dest: '/etc/freshclam.conf' owner: 'root' @@ -98,6 +105,7 @@ - name: 'Deploy /var/lib/clamav/lfops_whitelist.ign2' ansible.builtin.template: + backup: true src: 'var/lib/clamav/lfops_whitelist.ign2.j2' dest: '/var/lib/clamav/lfops_whitelist.ign2' owner: 'root' @@ -107,6 +115,7 @@ - name: 'Deploy /var/lib/clamav/lfops_whitelist.fp' ansible.builtin.template: + backup: true src: 'var/lib/clamav/lfops_whitelist.fp.j2' dest: '/var/lib/clamav/lfops_whitelist.fp' owner: 'root' @@ -116,6 +125,7 @@ - name: 'Deploy /etc/clamd.d/clamdscan.filelist' ansible.builtin.template: + backup: true src: 'etc/clamd.d/clamdscan.filelist.j2' dest: '/etc/clamd.d/clamdscan.filelist' owner: 'root' diff --git a/roles/cloud_init/README.md b/roles/cloud_init/README.md index 12371386e..dbd45f10d 100644 --- a/roles/cloud_init/README.md +++ b/roles/cloud_init/README.md @@ -8,9 +8,10 @@ Note that removing `cloud-init` could break some functions of the cloud provider ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `cloud_init` | Removes the cloud-init package | - | +`cloud_init` + +* Removes the cloud-init package. +* Triggers: none. ## License diff --git a/roles/cockpit/README.md b/roles/cockpit/README.md index e1fcd7125..1331b738c 100644 --- a/roles/cockpit/README.md +++ b/roles/cockpit/README.md @@ -5,19 +5,36 @@ This role can either install or remove all cockpit packages from the system (for ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `cockpit` | Installs or removes cockpit packages | - | -| `cockpit:state` | Manages the state of the systemd socket | - | +`cockpit` + +* Installs or removes cockpit packages. +* Triggers: none. + +`cockpit:state` + +* Manages the state of the systemd socket. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `cockpit__additional_packages` | List of strings. Additional cockpit packages to install, to extend the functionality of the web console | unset | -| `cockpit__socket_enabled` | Bool. Enables or disables the `cockpit.socket`, analogous to `systemctl enable/disable --now`. | `true` | -| `cockpit__state` | String. State of the cockpit packages. Possible Options: `'absent'`, `'present'`. | `'absent'` | +`cockpit__additional_packages` + +* Additional cockpit packages to install, to extend the functionality of the web console. +* Type: List of strings. +* Default: unset + +`cockpit__socket_enabled` + +* Enables or disables the `cockpit.socket`, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`cockpit__state` + +* State of the cockpit packages. Possible Options: `'absent'`, `'present'`. +* Type: String. +* Default: `'absent'` Example: ```yaml diff --git a/roles/collabora/README.md b/roles/collabora/README.md index 60a173739..2b98a2f63 100644 --- a/roles/collabora/README.md +++ b/roles/collabora/README.md @@ -12,39 +12,204 @@ If you use the ["Collabora" Playbook](https://github.com/Linuxfabrik/lfops/blob/ ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `collabora` | Installs and configures either Collabora CODE or Collabora Enterprise | Restarts coolwsd.service | -| `collabora:spell_check` | Installs spell checking tools | - | -| `collabora:state` | Manages the state of the coolwsd systemd service | - | +`collabora` + +* Installs and configures either Collabora CODE or Collabora Enterprise. +* Triggers: coolwsd.service restart. + +`collabora:spell_check` + +* Installs spell checking tools. +* Triggers: none. + +`collabora:state` + +* Manages the state of the coolwsd systemd service. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `collabora__coolwsd_allowed_languages__host_var` /
`collabora__coolwsd_allowed_languages__group_var` | List of dictionaries containing the supported languages of Writing Aids (spell checker, grammar checker, thesaurus, hyphenation) on this instance. Allowing too many has negative effect on startup performance. Subkeys:
  • `name`: Mandatory, string. Name of the language.
  • `state`: Optional, string. Either `present` or `absent`. Defaults to `present`.
|
  • `'de_AT'`
  • `'de_CH'`
  • `'de_DE'`
  • `'en_AU'`
  • `'en_CA'`
  • `'en_GB'`
  • `'en_US'`
  • `'fr'`
  • `'it_IT'`
| -| `collabora__coolwsd_content_security_policy` | List of CSP policy-directives. See https://w3.org/TR/CSP2. | `[]` | -| `collabora__coolwsd_deepl_api_url` | URL for the deepl API. Only works if `collabora__coolwsd_deepl_enabled` is true. | `'https://api-free.deepl.com/v2/translate'` | -| `collabora__coolwsd_deepl_auth_key` | Auth Key generated by your deepl account. Only works if `collabora__coolwsd_deepl_enabled` is true. | `''` | -| `collabora__coolwsd_deepl_enabled` | If true, shows translate option as a menu entry in the compact view and as an icon in the tabbed view. | `false` | -| `collabora__coolwsd_experimental_features` | If experimental features should be enabled or not. | `false` | -| `collabora__coolwsd_logging_file_enable` | If coolwsd should write to a logfile or not. | `true` | -| `collabora__coolwsd_lok_allow` | Allowed hosts as an external data source inside edited files. All allowed post_allow.host and storage.wopi entries are also considered to be allowed as a data source. Used for example in: PostMessage Action_InsertGraphics, =WEBSERVICE() function, external reference in the cell. | `[]` | -| `collabora__coolwsd_out_of_focus_timeout_secs` | The maximum number of seconds before dimming and stopping updates when the browser tab is no longer in focus. | `120` | -| `collabora__coolwsd_post_allow` | List of client IP addresses to allow for POST(REST). | `[]` | -| `collabora__coolwsd_ssl_enable` | Controls whether SSL encryption between coolwsd and the network is enabled (do not disable for production deployment). | `false` | -| `collabora__coolwsd_ssl_settings_ca_file_path` | Path to the ca file. Set this when coolwsd is SSL-terminating. | `'/etc/coolwsd/ca-chain.cert.pem'` | -| `collabora__coolwsd_ssl_settings_cert_file_path` | Path to the cert file. Set this when coolwsd is SSL-terminating. | `'/etc/coolwsd/cert.pem'` | -| `collabora__coolwsd_ssl_settings_key_file_path` | Path to the key file. Set this when coolwsd is SSL-terminating. | `'/etc/coolwsd/key.pem'` | -| `collabora__coolwsd_ssl_settings_ssl_verification` | Enable or disable SSL verification of hosts remote to coolwsd. If true SSL verification will be strict, otherwise certs of hosts will not be verified. | `false` | -| `collabora__coolwsd_ssl_termination` | Enable if coolwsd is behind a SSL-terminating proxy and therefore should act as if its using https but actually receives http. | `true` | -| `collabora__coolwsd_storage_wopi__host_var` /
`collabora__coolwsd_storage_wopi__group_var` | List of dictionaries containing regex pattern of hostname to allow access to the backend storage. Ususally the hostname application that uses Collabora CODE, for example Nextcloud. Subkeys:
  • `name`: Mandatory, string. Regex pattern.
  • `state`: Optional, string. Either `present` or `absent`. Defaults to `present`.
| `[]` | -| `collabora__coolwsd_welcome_enable` | Controls whether the welcome screen should be shown to the users on new install and updates. | `false` | -| `collabora__language_packages__host_var` /
`collabora__language_packages__group_var` | A list of dictionaries containing additional packages to be installed for language support (spell checking, thesaurus, etc). Subkeys:
  • `name`: Mandatory, string. Name of the package
  • `state`: Optional, string. Either `present` or `absent`. Defaults to `present`.
| `dict`, `mythes` and `hunspell` for de, en, fr, it | -| `collabora__logrotate` | Number. Log files are rotated `count` days before being removed or mailed to the address specified in a `logrotate` mail directive. If count is `0`, old versions are removed rather than rotated. If count is `-1`, old logs are not removed at all (use with caution, may waste performance and disk space). | `{{ logrotate__rotate \| d(14) }}` | -| `collabora__service_enabled` | Enables or disables the coolwsd service, analogous to `systemctl enable/disable --now`. | `true` | -| `collabora__use_code` | Use Collabora CODE Edition instead of Collabora Enterprise Edition. | `true` | +`collabora__coolwsd_allowed_languages__host_var` / `collabora__coolwsd_allowed_languages__group_var` + +* List of dictionaries containing the supported languages of Writing Aids (spell checker, grammar checker, thesaurus, hyphenation) on this instance. Allowing too many has negative effect on startup performance. +* Type: List of dictionaries. +* Default: + + * `'de_AT'` + * `'de_CH'` + * `'de_DE'` + * `'en_AU'` + * `'en_CA'` + * `'en_GB'` + * `'en_US'` + * `'fr'` + * `'it_IT'` + +* Subkeys: + + * `name`: + + * Mandatory. Name of the language. + * Type: String. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + * Default: `'present'` + +`collabora__coolwsd_content_security_policy` + +* List of CSP policy-directives. See https://w3.org/TR/CSP2. +* Type: List of strings. +* Default: `[]` + +`collabora__coolwsd_deepl_api_url` + +* URL for the deepl API. Only works if `collabora__coolwsd_deepl_enabled` is true. +* Type: String. +* Default: `'https://api-free.deepl.com/v2/translate'` + +`collabora__coolwsd_deepl_auth_key` + +* Auth Key generated by your deepl account. Only works if `collabora__coolwsd_deepl_enabled` is true. +* Type: String. +* Default: `''` + +`collabora__coolwsd_deepl_enabled` + +* If true, shows translate option as a menu entry in the compact view and as an icon in the tabbed view. +* Type: Bool. +* Default: `false` + +`collabora__coolwsd_experimental_features` + +* If experimental features should be enabled or not. +* Type: Bool. +* Default: `false` + +`collabora__coolwsd_logging_file_enable` + +* If coolwsd should write to a logfile or not. +* Type: Bool. +* Default: `true` + +`collabora__coolwsd_lok_allow` + +* Allowed hosts as an external data source inside edited files. All allowed post_allow.host and storage.wopi entries are also considered to be allowed as a data source. Used for example in: PostMessage Action_InsertGraphics, =WEBSERVICE() function, external reference in the cell. +* Type: List of strings. +* Default: `[]` + +`collabora__coolwsd_out_of_focus_timeout_secs` + +* The maximum number of seconds before dimming and stopping updates when the browser tab is no longer in focus. +* Type: Number. +* Default: `120` + +`collabora__coolwsd_post_allow` + +* List of client IP addresses to allow for POST(REST). +* Type: List of strings. +* Default: `[]` + +`collabora__coolwsd_ssl_enable` + +* Controls whether SSL encryption between coolwsd and the network is enabled (do not disable for production deployment). +* Type: Bool. +* Default: `false` + +`collabora__coolwsd_ssl_settings_ca_file_path` + +* Path to the ca file. Set this when coolwsd is SSL-terminating. +* Type: String. +* Default: `'/etc/coolwsd/ca-chain.cert.pem'` + +`collabora__coolwsd_ssl_settings_cert_file_path` + +* Path to the cert file. Set this when coolwsd is SSL-terminating. +* Type: String. +* Default: `'/etc/coolwsd/cert.pem'` + +`collabora__coolwsd_ssl_settings_key_file_path` + +* Path to the key file. Set this when coolwsd is SSL-terminating. +* Type: String. +* Default: `'/etc/coolwsd/key.pem'` + +`collabora__coolwsd_ssl_settings_ssl_verification` + +* Enable or disable SSL verification of hosts remote to coolwsd. If true SSL verification will be strict, otherwise certs of hosts will not be verified. +* Type: Bool. +* Default: `false` + +`collabora__coolwsd_ssl_termination` + +* Enable if coolwsd is behind a SSL-terminating proxy and therefore should act as if its using https but actually receives http. +* Type: Bool. +* Default: `true` + +`collabora__coolwsd_storage_wopi__host_var` / `collabora__coolwsd_storage_wopi__group_var` + +* List of dictionaries containing regex pattern of hostname to allow access to the backend storage. Usually the hostname application that uses Collabora CODE, for example Nextcloud. +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `name`: + + * Mandatory. Regex pattern. + * Type: String. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + * Default: `'present'` + +`collabora__coolwsd_welcome_enable` + +* Controls whether the welcome screen should be shown to the users on new install and updates. +* Type: Bool. +* Default: `false` + +`collabora__language_packages__host_var` / `collabora__language_packages__group_var` + +* A list of dictionaries containing additional packages to be installed for language support (spell checking, thesaurus, etc). +* Type: List of dictionaries. +* Default: `dict`, `mythes` and `hunspell` for de, en, fr, it + +* Subkeys: + + * `name`: + + * Mandatory. Name of the package. + * Type: String. + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + * Default: `'present'` + +`collabora__logrotate` + +* Log files are rotated `count` days before being removed or mailed to the address specified in a `logrotate` mail directive. If count is `0`, old versions are removed rather than rotated. If count is `-1`, old logs are not removed at all (use with caution, may waste performance and disk space). +* Type: Number. +* Default: `{{ logrotate__rotate | d(14) }}` + +`collabora__service_enabled` + +* Enables or disables the coolwsd service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`collabora__use_code` + +* Use Collabora CODE Edition instead of Collabora Enterprise Edition. +* Type: Bool. +* Default: `true` Example: ```yaml diff --git a/roles/collabora/tasks/main.yml b/roles/collabora/tasks/main.yml index cff5c0aad..b6d04004a 100644 --- a/roles/collabora/tasks/main.yml +++ b/roles/collabora/tasks/main.yml @@ -38,6 +38,7 @@ - name: 'deploy /etc/coolwsd/coolwsd.xml (v{{ collabora__installed_version }}{{ collabora__use_code | bool | ternary("-code", "") }})' ansible.builtin.template: + backup: true src: 'etc/coolwsd/{{ collabora__installed_version }}-coolwsd{{ collabora__use_code | bool | ternary("-code", "") }}.xml.j2' dest: '/etc/coolwsd/coolwsd.xml' owner: 'cool' @@ -54,6 +55,7 @@ - name: 'Deploy /etc/logrotate.d/coolwsd' ansible.builtin.template: + backup: true src: 'etc/logrotate.d/coolwsd.j2' dest: '/etc/logrotate.d/coolwsd' owner: 'root' diff --git a/roles/collect_rpmnew_rpmsave/README.md b/roles/collect_rpmnew_rpmsave/README.md index bbe80ff6f..0ff513001 100644 --- a/roles/collect_rpmnew_rpmsave/README.md +++ b/roles/collect_rpmnew_rpmsave/README.md @@ -51,9 +51,10 @@ The resulting set of files can then be further analyzed, for example by hand or ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `collect_rpmnew_rpmsave` | Collect all rpmnew/rpmsave files | - | +`collect_rpmnew_rpmsave` + +* Collect all rpmnew/rpmsave files. +* Triggers: none. ## License diff --git a/roles/collect_rpmnew_rpmsave/tasks/main.yml b/roles/collect_rpmnew_rpmsave/tasks/main.yml index 9eceb6bac..0bcdd61a1 100644 --- a/roles/collect_rpmnew_rpmsave/tasks/main.yml +++ b/roles/collect_rpmnew_rpmsave/tasks/main.yml @@ -62,7 +62,7 @@ label: '{{ item.key }}' when: - 'found_files.files' - - 'package_file_mappings is defined' + - 'package_file_mappings is defined and package_file_mappings | length > 0' - name: 'Download original files to control node' ansible.builtin.fetch: @@ -74,7 +74,7 @@ label: '{{ item.key }}' when: - 'found_files.files' - - 'package_file_mappings is defined' + - 'package_file_mappings is defined and package_file_mappings | length > 0' tags: - 'collect_rpmnew_rpmsave' diff --git a/roles/coturn/README.md b/roles/coturn/README.md index 1c01a46f5..f09ea6242 100644 --- a/roles/coturn/README.md +++ b/roles/coturn/README.md @@ -10,20 +10,33 @@ This role installs and configures [coturn](https://github.com/coturn/coturn). ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `coturn` | Installs and configures coturn | Restarts coturn.service | -| `coturn:sate` | Manages the state of the coturn systemd service | - | +`coturn` + +* Installs and configures coturn. +* Triggers: coturn.service restart. + +`coturn:sate` + +* Manages the state of the coturn systemd service. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `coturn__denied_peer_ip` | List of IP address ranges which never be used as peer IPs. This should be used to prevent the coturn server from accessing private IPs. Given the turn server is likely behind your firewall, remember to include any privileged public IPs too. | -| `coturn__realm` | The default realm to be used for the users. Hint: Should be the domain of the coturn server for the usage with Nextcloud. | -| `coturn__static_auth_secret` | Static authentication secret value (a string) for TURN REST API only. | +`coturn__denied_peer_ip` + +* List of IP address ranges which never be used as peer IPs. This should be used to prevent the coturn server from accessing private IPs. Given the turn server is likely behind your firewall, remember to include any privileged public IPs too. +* Type: List of strings. + +`coturn__realm` +* The default realm to be used for the users. Hint: Should be the domain of the coturn server for the usage with Nextcloud. +* Type: String. + +`coturn__static_auth_secret` + +* Static authentication secret value (a string) for TURN REST API only. +* Type: String. Example: ```yaml @@ -37,14 +50,41 @@ coturn__static_auth_secret: 'egi7eesa9eik4kae9ov9quohpheequ9XighaivobuThoo7ooKuo ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `coturn__allowed_peer_ip` | List of IP address ranges which are excepted from `coturn__denied_peer_ip`. | `['{{ ansible_facts["default_ipv4"]["address"] }}']` | -| `coturn__listening_port` | TURN listener port for UDP and TCP listeners | `3478` | -| `coturn__max_port` | Upper bound of the UDP port range for relay endpoints allocation. | `65535` | -| `coturn__min_port` | Lower bound of the UDP port range for relay endpoints allocation. | `49152` | -| `coturn__service_enabled` | Enables or disables the coturn service, analogous to `systemctl enable/disable --now`. Possible options: | `true` | -| `coturn__state_nonce` | Use extra security with nonce value having limited lifetime, in seconds. Set it to 0 for unlimited nonce lifetime. | `0` | +`coturn__allowed_peer_ip` + +* List of IP address ranges which are excepted from `coturn__denied_peer_ip`. +* Type: List of strings. +* Default: `['{{ ansible_facts["default_ipv4"]["address"] }}']` + +`coturn__listening_port` + +* TURN listener port for UDP and TCP listeners. +* Type: Number. +* Default: `3478` + +`coturn__max_port` + +* Upper bound of the UDP port range for relay endpoints allocation. +* Type: Number. +* Default: `65535` + +`coturn__min_port` + +* Lower bound of the UDP port range for relay endpoints allocation. +* Type: Number. +* Default: `49152` + +`coturn__service_enabled` + +* Enables or disables the coturn service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`coturn__state_nonce` + +* Use extra security with nonce value having limited lifetime, in seconds. Set it to 0 for unlimited nonce lifetime. +* Type: Number. +* Default: `0` Example: ```yaml diff --git a/roles/coturn/tasks/main.yml b/roles/coturn/tasks/main.yml index e52568109..ecc8c506d 100644 --- a/roles/coturn/tasks/main.yml +++ b/roles/coturn/tasks/main.yml @@ -7,6 +7,7 @@ - name: 'Deploy /etc/coturn/turnserver.conf' ansible.builtin.template: + backup: true src: 'etc/coturn/turnserver.conf.j2' dest: '/etc/coturn/turnserver.conf' owner: 'root' diff --git a/roles/crypto_policy/README.md b/roles/crypto_policy/README.md index a5da246d7..09e30bcb7 100644 --- a/roles/crypto_policy/README.md +++ b/roles/crypto_policy/README.md @@ -5,16 +5,22 @@ This role sets the crypto policy for the system. In addition, it implements and ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `crypto_policy` | Sets the system crypto policy | - | +`crypto_policy` + +* Sets the system crypto policy. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `crypto_policy__policy` | String. The crypto policy to activate. See `roles/crypto_policy/templates/etc/crypto-policies/policies/modules/` for a list of available crypto policies. Example: `DEFAULT:LINUXFABRIK-NO-SHA1:LINUXFABRIK-SSH-NO-CBC` |
  • RedHat8:
    `'DEFAULT:LINUXFABRIK-NO-SHA1:LINUXFABRIK-NO-WEAKMAC:LINUXFABRIK-SSH-NO-CBC:LINUXFABRIK-SSH-NO-CHACHA20'`
  • RedHat9:
    `'DEFAULT:LINUXFABRIK-NO-SHA1:LINUXFABRIK-NO-WEAKMAC:LINUXFABRIK-SSH-NO-CBC:LINUXFABRIK-SSH-NO-CHACHA20:LINUXFABRIK-SSH-NO-ETM'`
  • RedHat10:
    `'DEFAULT:LINUXFABRIK-NO-SHA1:LINUXFABRIK-NO-WEAKMAC:LINUXFABRIK-SSH-NO-CBC:LINUXFABRIK-SSH-NO-CHACHA20:LINUXFABRIK-SSH-NO-ETM'`
| +`crypto_policy__policy` + +* The crypto policy to activate. See `roles/crypto_policy/templates/etc/crypto-policies/policies/modules/` for a list of available crypto policies. Example: `DEFAULT:LINUXFABRIK-NO-SHA1:LINUXFABRIK-SSH-NO-CBC` +* Type: String. +* Default: + * RedHat8: `'DEFAULT:LINUXFABRIK-NO-SHA1:LINUXFABRIK-NO-WEAKMAC:LINUXFABRIK-SSH-NO-CBC:LINUXFABRIK-SSH-NO-CHACHA20'` + * RedHat9: `'DEFAULT:LINUXFABRIK-NO-SHA1:LINUXFABRIK-NO-WEAKMAC:LINUXFABRIK-SSH-NO-CBC:LINUXFABRIK-SSH-NO-CHACHA20:LINUXFABRIK-SSH-NO-ETM'` + * RedHat10: `'DEFAULT:LINUXFABRIK-NO-SHA1:LINUXFABRIK-NO-WEAKMAC:LINUXFABRIK-SSH-NO-CBC:LINUXFABRIK-SSH-NO-CHACHA20:LINUXFABRIK-SSH-NO-ETM'` Example: ```yaml diff --git a/roles/crypto_policy/tasks/main.yml b/roles/crypto_policy/tasks/main.yml index 20d7ca031..57af2aa1c 100644 --- a/roles/crypto_policy/tasks/main.yml +++ b/roles/crypto_policy/tasks/main.yml @@ -2,6 +2,7 @@ - name: 'Deploy /etc/crypto-policies/policies/modules/*.pmod' ansible.builtin.template: + backup: true src: '{{ item }}' dest: '/etc/crypto-policies/policies/modules/{{ item | basename | split(".") | first }}.pmod' owner: 'root' diff --git a/roles/dnf_makecache/README.md b/roles/dnf_makecache/README.md index c1658d8ae..4edb645e2 100644 --- a/roles/dnf_makecache/README.md +++ b/roles/dnf_makecache/README.md @@ -5,17 +5,25 @@ This role ensures that the DNF-makecache Systemd service and timer are disabled. ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `dnf_makecache` | Manages the dnf-makecache service and timer | - | +`dnf_makecache` + +* Manages the dnf-makecache service and timer. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `dnf_makecache__service_enabled` | Enables or disables the DNF-makecache service, analogous to `systemctl enable/disable --now`. | `false` | -| `dnf_makecache__timer_enabled` | Enables or disables the DNF-makecache timer, analogous to `systemctl enable/disable --now`. | `false` | +`dnf_makecache__service_enabled` + +* Enables or disables the DNF-makecache service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `false` + +`dnf_makecache__timer_enabled` + +* Enables or disables the DNF-makecache timer, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `false` Example: ```yaml diff --git a/roles/dnf_versionlock/README.md b/roles/dnf_versionlock/README.md index 256c19330..4ad4fccd3 100644 --- a/roles/dnf_versionlock/README.md +++ b/roles/dnf_versionlock/README.md @@ -9,16 +9,18 @@ Hints: ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `dnf_versionlock` | Installs and configures dnf versionlock | - | +`dnf_versionlock` + +* Installs and configures dnf versionlock. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `dnf_versionlock__versionlocks` | List of versionlock () entries. Have a look at `man yum-versionlock` for RHEL7 or [dnf versionlock](https://dnf-plugins-core.readthedocs.io/en/latest/versionlock.html) for RHEL8. | +`dnf_versionlock__versionlocks` + +* List of versionlock () entries. Have a look at `man yum-versionlock` for RHEL7 or [dnf versionlock](https://dnf-plugins-core.readthedocs.io/en/latest/versionlock.html) for RHEL8. +* Type: List of strings. Example: ```yaml diff --git a/roles/dnf_versionlock/tasks/main.yml b/roles/dnf_versionlock/tasks/main.yml index e25a9906d..a58049401 100644 --- a/roles/dnf_versionlock/tasks/main.yml +++ b/roles/dnf_versionlock/tasks/main.yml @@ -12,6 +12,7 @@ - name: 'deploy /etc/dnf/plugins/versionlock.list' ansible.builtin.template: + backup: true src: 'etc/dnf/plugins/versionlock.list.j2' dest: '{{ dnf_versionlock__list_path }}' owner: 'root' diff --git a/roles/docker/README.md b/roles/docker/README.md index 2f4afc6fa..88819c33d 100644 --- a/roles/docker/README.md +++ b/roles/docker/README.md @@ -12,22 +12,54 @@ If you use the ["docker" Playbook](https://github.com/Linuxfabrik/lfops/blob/mai ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `docker` | Installs and configures docker | Restarts docker.service | -| `docker:state` | Manages the state of the docker service | - | +`docker` + +* Installs and configures docker. +* Triggers: docker.service restart. + +`docker:state` + +* Manages the state of the docker service. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `docker__daemon_json_dns`| A list of DNS server for all Docker containers. | The server's nameserver | -| `docker__daemon_json_insecure_registries`| A list of insecure registries (without TLS) which should be accepted by the docker daemon. | unset | -| `docker__daemon_json_log_driver`| The default logging driver for all containers. Possible options: . | `'syslog'` | -| `docker__daemon_json_log_opts`| A dictionary of logging options. Possible options: . | unset | -| `docker__service_enabled`| Enables or disables the docker service, analogous to `systemctl enable/disable`. | `true` | -| `docker__service_state`| Changes the state of the docker service, analogous to `systemctl start/stop/restart/reload`. Possible options:
* `started`
* `stopped`
* `restarted`
* `reloaded` | `'started'` | +`docker__daemon_json_dns` + +* A list of DNS server for all Docker containers. +* Type: List. +* Default: the server's nameserver (`['{{ ansible_facts["dns"]["nameservers"][0] }}']`) + +`docker__daemon_json_insecure_registries` + +* A list of insecure registries (without TLS) which should be accepted by the docker daemon. +* Type: List. +* Default: unset + +`docker__daemon_json_log_driver` + +* The default logging driver for all containers. Possible options: . +* Type: String. +* Default: `'syslog'` + +`docker__daemon_json_log_opts` + +* A dictionary of logging options. Possible options: . +* Type: Dictionary. +* Default: unset + +`docker__service_enabled` + +* Enables or disables the docker service, analogous to `systemctl enable/disable`. +* Type: Bool. +* Default: `true` + +`docker__service_state` + +* Changes the state of the docker service, analogous to `systemctl start/stop/restart/reload`. Possible options: `started`, `stopped`, `restarted`, `reloaded`. +* Type: String. +* Default: `'started'` Example: ```yaml diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 98715e16e..93e15860c 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -17,6 +17,7 @@ - name: 'Deploy /etc/docker/daemon.json' ansible.builtin.template: + backup: true src: 'etc/docker/daemon.json.j2' dest: '/etc/docker/daemon.json' mode: 0o640 diff --git a/roles/duplicity/README.md b/roles/duplicity/README.md index 5d0cba959..a490ff6dc 100644 --- a/roles/duplicity/README.md +++ b/roles/duplicity/README.md @@ -30,21 +30,55 @@ To start a backup, simply call `duba` (or `duba --config=/etc/duba/duba.json --c ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `duplicity` | Installs and configures duplicity | - | -| `duplicity:configure` | Deploys the configuration for duplicity | - | -| `duplicity:script` | Just deploys the `duba` script | - | -| `duplicity:state` | Manages the state of the daily systemd timer | - | +`duplicity` + +* Installs and configures duplicity. +* Triggers: none. + +`duplicity:configure` + +* Deploys the configuration for duplicity. +* Triggers: none. + +`duplicity:script` + +* Just deploys the `duba` script. +* Triggers: none. + +`duplicity:state` + +* Manages the state of the daily systemd timer. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `duplicity__gpg_encrypt_master_key_block` | The ASCII-armored **public** master GPG key. Obtain it using `gpg --armor --export $GPG_KEY`. This key is imported on the server and is used in addition to the server's own local GPG key to encrypt the backups. This means that the backups can be restored using either the master or the server's local private key (which is pretty cool in case of a desaster recovery). Be aware of the empty line between `-----BEGIN PGP PUBLIC KEY BLOCK-----` and your public key block. | -| `duplicity__gpg_encrypt_master_key` | The long key ID of the master GPG key. Obtain it using `gpg --list-keys --keyid-format=long` (after importing the key) or `gpg /path/to/keyfile`. | -| `duplicity__swift_login` | The Swift username and password. Usually, this is given by the provider of the Swift Storage. Subkeys:
* `username`: Mandatory, string. The Swift username.
* `password`: Mandatory, string. The Swift password. | +`duplicity__gpg_encrypt_master_key_block` + +* The ASCII-armored **public** master GPG key. Obtain it using `gpg --armor --export $GPG_KEY`. This key is imported on the server and is used in addition to the server's own local GPG key to encrypt the backups. This means that the backups can be restored using either the master or the server's local private key (which is pretty cool in case of a desaster recovery). Be aware of the empty line between `-----BEGIN PGP PUBLIC KEY BLOCK-----` and your public key block. +* Type: String. + +`duplicity__gpg_encrypt_master_key` + +* The long key ID of the master GPG key. Obtain it using `gpg --list-keys --keyid-format=long` (after importing the key) or `gpg /path/to/keyfile`. +* Type: String. + +`duplicity__swift_login` + +* The Swift username and password. Usually, this is given by the provider of the Swift Storage. +* Type: Dictionary. + +* Subkeys: + + * `username`: + + * Mandatory. The Swift username. + * Type: String. + + * `password`: + + * Mandatory. The Swift password. + * Type: String. Example: ```yaml @@ -64,24 +98,133 @@ duplicity__swift_login: ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `duplicity__backup_backend` | The backup backend being used. Possible options:
  • `swift`
  • `sftp`
| `swift` | -| `duplicity__backup_dest_container` | The Swift container. This can be used to separate backups on the destination. By default, this will be used in `duplicity__backup_dest`. | `'{{ ansible_nodename }}'` | -| `duplicity__backup_dest` | The backup destination. This will be used in combination with the backup source path to create the target URL for `duplicity`. | `duplicity__backup_dest_container \| regex_replace("/$", "") }}'` | -| `duplicity__backup_full_if_older_than` | After how long a full backup instead of a incremental one should be done. Time Formats: `s`, `m`, `h`, `D`, `W`, `M`, or `Y`. | `'30D'` | -| `duplicity__backup_retention_time` | The retention time of the backups. Time Formats: `s`, `m`, `h`, `D`, `W`, `M`, or `Y`. | `'30D'` | -| `duplicity__backup_sources__host_var` /
`duplicity__backup_sources__group_var` | List of dictionaries with directories to backup. Subkeys:
  • `path`: Mandatory, string. Path to the folder to be backed up.
  • `divide`: Optional, boolean. Defaults to `false`. Whether to split a large directory at its first level to perform parallel backups. Imagine a computer with 4 processor cores and the folder `data` containing 100 files and folders. If `divide` is set to `true`, `duba` will start and control 5 duplicate processes at once to speed up the backup process by almost a factor of 5.
  • `excludes`: Optional, list. Defaults to `[]`. List of patterns that should not be included in the backup for this `path`.
  • `state`: Optional, string. Either `present` or `absent`. Defaults to `present`.
|
  • `/backup`
  • `/etc`
  • `/home`
  • `/opt`
  • `/root`
  • `/var/spool/cron`.
| -| `duplicity__excludes` | List of *global* exclude shell patterns for `duplicity`. Have a look at `man duplicity` for details. | `['**/*.git*', '**/*.svn*', '**/*.temp', '**/*.tmp', '**/.cache', '**/cache', '**/log']` | -| `duplicity__loglevel` | Set the loglevel. Possible options:
  • `error`
  • `warning`
  • `notice`
  • `info`
  • `debug`
| `'notice'` | -| `duplicity__logrotate` | Number. Log files are rotated `count` days before being removed or mailed to the address specified in a `logrotate` mail directive. If count is `0`, old versions are removed rather than rotated. If count is `-1`, old logs are not removed at all (use with caution, may waste performance and disk space). | `'{{ logrotate__rotate \| d(14) }}'` | -| `duplicity__on_calendar_hour` | A shorthand to set the hour of `duplicity__on_calendar`. | `'23'` | -| `duplicity__on_calendar` | The `OnCalendar` definition for the daily systemd timer. Have a look at `man systemd.time(7)` for the format. | `'*-*-* {{ duplicity__on_calendar_hour }}:{{ 59 \| random(seed=inventory_hostname) }}'` | -| `duplicity__sftp_password` | Password for SSH User that is used by SFTP connection. | -| `duplicity__swift_authurl` | The Authentication URL for Swift. Usually, this is given by the provider of the Swift Storage. | `'swiss-backup02.infomaniak.com/identity/v3'` | -| `duplicity__swift_authversion` | The Authentication Version for Swift. Usually, this is given by the provider of the Swift Storage. | `'3'` | -| `duplicity__swift_tenantname` | The Swift Tenantname. Usually, this is given by the provider of the Swift Storage. | `'sb_project_{{ duplicity__swift_login["username"] }}'` | -| `duplicity__timer_enabled` | The state of the daily systemd timer. | `true` | +`duplicity__backup_backend` + +* The backup backend being used. Possible options: `swift`, `sftp`. +* Type: String. +* Default: `'swift'` + +`duplicity__backup_dest_container` + +* The Swift container. This can be used to separate backups on the destination. By default, this will be used in `duplicity__backup_dest`. +* Type: String. +* Default: `'{{ ansible_nodename }}'` + +`duplicity__backup_dest` + +* The backup destination. This will be used in combination with the backup source path to create the target URL for `duplicity`. +* Type: String. +* Default: `'swift://{{ duplicity__backup_dest_container | regex_replace("/$", "") }}'` + +`duplicity__backup_full_if_older_than` + +* After how long a full backup instead of a incremental one should be done. Time Formats: `s`, `m`, `h`, `D`, `W`, `M`, or `Y`. +* Type: String. +* Default: `'30D'` + +`duplicity__backup_retention_time` + +* The retention time of the backups. Time Formats: `s`, `m`, `h`, `D`, `W`, `M`, or `Y`. +* Type: String. +* Default: `'30D'` + +`duplicity__backup_sources__host_var` / `duplicity__backup_sources__group_var` + +* List of dictionaries with directories to backup. +* Type: List of dictionaries. +* Default: + + * `/backup` + * `/etc` + * `/home` + * `/opt` + * `/root` + * `/var/spool/cron` + +* Subkeys: + + * `path`: + + * Mandatory. Path to the folder to be backed up. + * Type: String. + + * `divide`: + + * Optional. Whether to split a large directory at its first level to perform parallel backups. Imagine a computer with 4 processor cores and the folder `data` containing 100 files and folders. If `divide` is set to `true`, `duba` will start and control 5 duplicate processes at once to speed up the backup process by almost a factor of 5. + * Type: Bool. + * Default: `false` + + * `excludes`: + + * Optional. List of patterns that should not be included in the backup for this `path`. + * Type: List of strings. + * Default: `[]` + + * `state`: + + * Optional. Either `present` or `absent`. + * Type: String. + * Default: `'present'` + +`duplicity__excludes` + +* List of *global* exclude shell patterns for `duplicity`. Have a look at `man duplicity` for details. +* Type: List of strings. +* Default: `['**/*.git*', '**/*.svn*', '**/*.temp', '**/*.tmp', '**/.cache', '**/cache', '**/log']` + +`duplicity__loglevel` + +* Set the loglevel. Possible options: `error`, `warning`, `notice`, `info`, `debug`. +* Type: String. +* Default: `'notice'` + +`duplicity__logrotate` + +* Log files are rotated `count` days before being removed or mailed to the address specified in a `logrotate` mail directive. If count is `0`, old versions are removed rather than rotated. If count is `-1`, old logs are not removed at all (use with caution, may waste performance and disk space). +* Type: Number. +* Default: `{{ logrotate__rotate | d(14) }}` + +`duplicity__on_calendar_hour` + +* A shorthand to set the hour of `duplicity__on_calendar`. +* Type: String. +* Default: `'23'` + +`duplicity__on_calendar` + +* The `OnCalendar` definition for the daily systemd timer. Have a look at `man systemd.time(7)` for the format. +* Type: String. +* Default: `'*-*-* {{ duplicity__on_calendar_hour }}:{{ 59 | random(seed=inventory_hostname) }}'` + +`duplicity__sftp_password` + +* Password for SSH User that is used by SFTP connection. +* Type: String. +* Default: unset + +`duplicity__swift_authurl` + +* The Authentication URL for Swift. Usually, this is given by the provider of the Swift Storage. +* Type: String. +* Default: `'https://swiss-backup02.infomaniak.com/identity/v3'` + +`duplicity__swift_authversion` + +* The Authentication Version for Swift. Usually, this is given by the provider of the Swift Storage. +* Type: String. +* Default: `'3'` + +`duplicity__swift_tenantname` + +* The Swift Tenantname. Usually, this is given by the provider of the Swift Storage. +* Type: String. +* Default: `'sb_project_{{ duplicity__swift_login["username"] }}'` + +`duplicity__timer_enabled` + +* The state of the daily systemd timer. +* Type: Bool. +* Default: `true` Example: ```yaml diff --git a/roles/duplicity/tasks/main.yml b/roles/duplicity/tasks/main.yml index d8626a3d4..31dd7282e 100644 --- a/roles/duplicity/tasks/main.yml +++ b/roles/duplicity/tasks/main.yml @@ -61,12 +61,14 @@ - name: 'Deploy /etc/duba/duba.json' ansible.builtin.template: + backup: true src: 'etc/duba/duba.json.j2' dest: '/etc/duba/duba.json' mode: 0o600 # file contains secrets - name: 'Deploy /etc/systemd/system/duba.service' ansible.builtin.template: + backup: true src: 'etc/systemd/system/duba.service.j2' dest: '/etc/systemd/system/duba.service' owner: 'root' @@ -75,6 +77,7 @@ - name: 'Deploy /etc/systemd/system/duba.timer' ansible.builtin.template: + backup: true src: 'etc/systemd/system/duba.timer.j2' dest: '/etc/systemd/system/duba.timer' owner: 'root' @@ -84,6 +87,7 @@ - name: 'Deploy /etc/logrotate.d/duplicity' ansible.builtin.template: + backup: true src: 'etc/logrotate.d/duplicity.j2' dest: '/etc/logrotate.d/duplicity' owner: 'root' @@ -98,6 +102,7 @@ - name: 'Deploy /usr/local/bin/duba' ansible.builtin.template: + backup: true src: 'usr/local/bin/duba.j2' dest: '/usr/local/bin/duba' owner: 'root' diff --git a/roles/elastic_agent/README.md b/roles/elastic_agent/README.md index de759281d..3bad8c20b 100644 --- a/roles/elastic_agent/README.md +++ b/roles/elastic_agent/README.md @@ -17,12 +17,25 @@ This role installs and configures [Elastic Agent](https://www.elastic.co/elastic ## Tags -| Tag | Description | -| --- | ----------- | -| `elastic_agent` | Installs and configures elastic-agent | -| `elastic_agent:certs` | Deploys CA certificate | -| `elastic_agent:enroll` | Enrolls the agent to Fleet Server | -| `elastic_agent:state` | Manages the state of the elastic-agent service | +`elastic_agent` + +* Installs and configures elastic-agent. +* Triggers: none. + +`elastic_agent:certs` + +* Deploys CA certificate. +* Triggers: none. + +`elastic_agent:enroll` + +* Enrolls the agent to Fleet Server. +* Triggers: none. + +`elastic_agent:state` + +* Manages the state of the elastic-agent service. +* Triggers: none. ## Pre-Installation Steps @@ -39,10 +52,17 @@ Get an enrollment token from Kibana: ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `elastic_agent__enrollment_token` | The enrollment token for registering the agent with Fleet Server. Obtain from Kibana Fleet UI or API. | -| `elastic_agent__fleet_url` | URL of the Fleet Server. Will only be used for the initial connection, afterwards the fleet server defined in the policy will be used. | +`elastic_agent__enrollment_token` + +* The enrollment token for registering the agent with Fleet Server. Obtain from Kibana Fleet UI or API. +* Type: String. +* Default: none + +`elastic_agent__fleet_url` + +* URL of the Fleet Server. Will only be used for the initial connection, afterwards the fleet server defined in the policy will be used. +* Type: String. +* Default: none Example: ```yaml @@ -54,13 +74,35 @@ elastic_agent__fleet_url: 'https://fleet1.example.com:8220' ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `elastic_agent__fleet_ca` | ASCII-armored PEM CA certificate for verifying the Fleet Server TLS certificate. | unset | -| `elastic_agent__insecure` | Skip TLS verification. Only use for testing with self-signed certificates. | `false` | -| `elastic_agent__service_enabled` | Enables or disables the elastic-agent service, analogous to `systemctl enable/disable`. | `true` | -| `elastic_agent__service_state` | The state of the elastic-agent service. Possible options: `started`, `stopped`, `restarted`. | `'started'` | -| `elastic_agent__tags` | List of tags to apply to the agent during enrollment. Useful for identifying agents in Fleet. | `[]` | +`elastic_agent__fleet_ca` + +* ASCII-armored PEM CA certificate for verifying the Fleet Server TLS certificate. +* Type: String. +* Default: unset + +`elastic_agent__insecure` + +* Skip TLS verification. Only use for testing with self-signed certificates. +* Type: Bool. +* Default: `false` + +`elastic_agent__service_enabled` + +* Enables or disables the elastic-agent service, analogous to `systemctl enable/disable`. +* Type: Bool. +* Default: `true` + +`elastic_agent__service_state` + +* The state of the elastic-agent service. Possible options: `started`, `stopped`, `restarted`. +* Type: String. +* Default: `'started'` + +`elastic_agent__tags` + +* List of tags to apply to the agent during enrollment. Useful for identifying agents in Fleet. +* Type: List. +* Default: `[]` Example: ```yaml diff --git a/roles/elastic_agent_fleet_server/README.md b/roles/elastic_agent_fleet_server/README.md index 5ad292567..e6e3eb6ca 100644 --- a/roles/elastic_agent_fleet_server/README.md +++ b/roles/elastic_agent_fleet_server/README.md @@ -17,12 +17,25 @@ This role installs and configures [Elastic Agent](https://www.elastic.co/elastic ## Tags -| Tag | Description | -| --- | ----------- | -| `elastic_agent_fleet_server` | Installs and configures elastic-agent as Fleet Server | -| `elastic_agent_fleet_server:certs` | Deploys TLS certificates | -| `elastic_agent_fleet_server:enroll` | Enrolls the agent as Fleet Server | -| `elastic_agent_fleet_server:state` | Manages the state of the elastic-agent service | +`elastic_agent_fleet_server` + +* Installs and configures elastic-agent as Fleet Server. +* Triggers: none. + +`elastic_agent_fleet_server:certs` + +* Deploys TLS certificates. +* Triggers: none. + +`elastic_agent_fleet_server:enroll` + +* Enrolls the agent as Fleet Server. +* Triggers: none. + +`elastic_agent_fleet_server:state` + +* Manages the state of the elastic-agent service. +* Triggers: none. ## Pre-Installation Steps @@ -73,6 +86,7 @@ EOF ``` Copy the generated certificates to the Ansible inventory. The certificates are used for: + * `elastic_agent_fleet_server__elasticsearch_ca` - The CA certificate (same as Elasticsearch CA) * `elastic_agent_fleet_server__ssl_cert` - The Fleet Server certificate * `elastic_agent_fleet_server__ssl_key` - The Fleet Server private key @@ -80,10 +94,17 @@ Copy the generated certificates to the Ansible inventory. The certificates are u ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `elastic_agent_fleet_server__elasticsearch_host` | Elasticsearch URL. Will only be used for the initial connection, so the node's role is irrelevant. Afterwards, the output defined in the policy will be used. | -| `elastic_agent_fleet_server__service_token` | The service token for authenticating the Fleet Server to Elasticsearch. Generate using the Elasticsearch API. | +`elastic_agent_fleet_server__elasticsearch_host` + +* Elasticsearch URL. Will only be used for the initial connection, so the node's role is irrelevant. Afterwards, the output defined in the policy will be used. +* Type: String. +* Default: none + +`elastic_agent_fleet_server__service_token` + +* The service token for authenticating the Fleet Server to Elasticsearch. Generate using the Elasticsearch API. +* Type: String. +* Default: none Example: ```yaml @@ -95,16 +116,53 @@ elastic_agent_fleet_server__service_token: 'AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL3Rv ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `elastic_agent_fleet_server__elasticsearch_ca` | ASCII-armored PEM CA certificate for verifying Elasticsearch TLS (Fleet Server -> Elasticsearch). | unset | -| `elastic_agent_fleet_server__insecure` | Skip TLS verification. Only use for testing with self-signed certificates. | `false` | -| `elastic_agent_fleet_server__policy_id` | The Fleet Server policy ID. Must exist in Kibana Fleet. | `'fleet-server-policy'` | -| `elastic_agent_fleet_server__service_enabled` | Enables or disables the elastic-agent service, analogous to `systemctl enable/disable`. | `true` | -| `elastic_agent_fleet_server__service_state` | The state of the elastic-agent service. Possible options: `started`, `stopped`, `restarted`. | `'started'` | -| `elastic_agent_fleet_server__ssl_cert` | ASCII-armored PEM TLS certificate for the Fleet Server (Fleet Agent -> Fleet Server). | unset | -| `elastic_agent_fleet_server__ssl_key` | ASCII-armored PEM TLS private key for the Fleet Server (Fleet Agent -> Fleet Server). | unset | -| `elastic_agent_fleet_server__url` | The URL of the Fleet Server. Used by agents to connect. | `'https://{{ ansible_facts["nodename"] }}:8220'` | +`elastic_agent_fleet_server__elasticsearch_ca` + +* ASCII-armored PEM CA certificate for verifying Elasticsearch TLS (Fleet Server -> Elasticsearch). +* Type: String. +* Default: unset + +`elastic_agent_fleet_server__insecure` + +* Skip TLS verification. Only use for testing with self-signed certificates. +* Type: Bool. +* Default: `false` + +`elastic_agent_fleet_server__policy_id` + +* The Fleet Server policy ID. Must exist in Kibana Fleet. +* Type: String. +* Default: `'fleet-server-policy'` + +`elastic_agent_fleet_server__service_enabled` + +* Enables or disables the elastic-agent service, analogous to `systemctl enable/disable`. +* Type: Bool. +* Default: `true` + +`elastic_agent_fleet_server__service_state` + +* The state of the elastic-agent service. Possible options: `started`, `stopped`, `restarted`. +* Type: String. +* Default: `'started'` + +`elastic_agent_fleet_server__ssl_cert` + +* ASCII-armored PEM TLS certificate for the Fleet Server (Fleet Agent -> Fleet Server). +* Type: String. +* Default: unset + +`elastic_agent_fleet_server__ssl_key` + +* ASCII-armored PEM TLS private key for the Fleet Server (Fleet Agent -> Fleet Server). +* Type: String. +* Default: unset + +`elastic_agent_fleet_server__url` + +* The URL of the Fleet Server. Used by agents to connect. +* Type: String. +* Default: `'https://{{ ansible_facts["nodename"] }}:8220'` Example: ```yaml diff --git a/roles/elasticsearch/README.md b/roles/elasticsearch/README.md index 9c1c2ea2b..089e12525 100644 --- a/roles/elasticsearch/README.md +++ b/roles/elasticsearch/README.md @@ -1,13 +1,13 @@ # Ansible Role linuxfabrik.lfops.elasticsearch -This role installs and configures an Elasticsearch server. +This role installs and configures Elasticsearch, either as a single-node instance or as a multi-node cluster. Note that this role does NOT let you specify a particular Elasticsearch server version. It simply installs the latest available Elasticsearch server version from the repos configured in the system. If you want or need to install a specific version, have a look at the [linuxfabrik.lfops.repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch) role. ## Mandatory Requirements -* Enable the official elasticsearch repository. This can be done using the [linuxfabrik.lfops.repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch) role. +* Enable the official Elasticsearch repository. This can be done using the [linuxfabrik.lfops.repo_elasticsearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_elasticsearch) role. If you use the [elasticsearch playbook](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/elasticsearch.yml), this is automatically done for you. @@ -19,14 +19,39 @@ If you use the [elasticsearch playbook](https://github.com/Linuxfabrik/lfops/blo If you use the [elasticsearch playbook](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/elasticsearch.yml), this is automatically done for you. +## Single-Node Setup + +For a single-node setup, no special configuration is needed beyond the mandatory requirements. When `elasticsearch__discovery_seed_hosts` is not set, the role automatically configures `discovery.type: single-node`. After installation, generate the initial password for the `elastic` user (see Post-Installation Steps below). + + ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `elasticsearch` | Installs and configures Elasticsearch | Restarts elasticsearch.service | -| `elasticsearch:certs` | Deploys TLS certificates | Restarts elasticsearch.service | -| `elasticsearch:configure` | Deploys configuration files | Restarts elasticsearch.service | -| `elasticsearch:state` | Manages the state of the Elasticsearch service | - | +`elasticsearch` + +* Installs Elasticsearch and unzip. +* Creates the data directory and tmp directory. +* Deploys all configuration files. +* Deploys TLS certificates (if `elasticsearch__ca_cert` is set). +* Manages the state of the Elasticsearch service. +* Triggers: elasticsearch.service restart. + +`elasticsearch:certs` + +* Deploys TLS certificates (CA, HTTP, transport). +* Triggers: elasticsearch.service restart. + +`elasticsearch:configure` + +* Deploys `/etc/elasticsearch/elasticsearch.yml`. +* Deploys `/etc/elasticsearch/log4j2.properties`. +* Deploys the sysconfig file. +* Deploys `/tmp/certutil.yml` (if `elasticsearch__discovery_seed_hosts` is set). +* Triggers: elasticsearch.service restart. + +`elasticsearch:state` + +* Manages the state of the Elasticsearch service (`systemctl enable/disable`, `start/stop/restart`). +* Triggers: none. ## Post-Installation Steps @@ -40,22 +65,27 @@ After setting up a single node or cluster, generate the initial password for the ## Setting Up an Elasticsearch Cluster -This role supports creating a multi-node Elasticsearch cluster using manual certificate distribution. This approach provides full automation and avoids the limitations of Elasticsearch's enrollment token system (which requires interactive commands that cannot be automated in Ansible). +This role supports creating a multi-node Elasticsearch cluster using manual certificate distribution. Elasticsearch 8.x ships with an enrollment token mechanism for adding nodes to a cluster. However, enrollment tokens expire after 30 minutes, and the process requires interactive commands on the command line. This makes it unsuitable for automation with Ansible. Instead, this role generates TLS certificates manually using `elasticsearch-certutil` and distributes them to all nodes via Ansible. + +Note that a fully unattended cluster setup is not possible with Elasticsearch. The initial certificate generation and password creation require manual steps on the command line (see below). Once certificates exist, subsequent node deployments are fully automated. If you need a search engine with better automation support, consider [OpenSearch](https://github.com/Linuxfabrik/lfops/tree/main/roles/opensearch), where certificate generation and admin password setup are fully integrated into the Ansible role. + +The following steps are marked as **manual** or **automated** accordingly. All cluster nodes must: + * Have the same `elasticsearch__cluster_name__*_var` configured * Be able to communicate with each other (configure `elasticsearch__network_host` to be accessible from other nodes, e.g., `0.0.0.0` or a specific IP) * Have `elasticsearch__discovery_seed_hosts` set to the list of all cluster nodes from the start -### Deploy First Node and Generate Certificates +### Deploy First Node and Generate Certificates (manual) -Deploy Elasticsearch on the first node (stopped state) to use the certutil tool: +Deploy Elasticsearch on the first node (stopped state) to use the certutil tool (**automated**): ```bash ansible-playbook --inventory inventory linuxfabrik.lfops.elasticsearch --limit node1.example.com --extra-vars='{"elasticsearch__service_state": "stopped"}' ``` -Connect to the first node and generate certificates: +Connect to the first node and generate certificates (**manual** - Elasticsearch does not provide a non-interactive way to do this): ```bash # generate CA (use empty password for automation) with 10 years validity # IMPORTANT: Back up this CA - it's needed for adding nodes later @@ -75,12 +105,12 @@ unzip ca.zip --out /tmp/certs.zip ``` -Copy the generated certificates to your Ansible inventory (have a look at the Optional Variables below for the paths). +Copy the generated certificates to your Ansible inventory (**manual**). Have a look at the Optional Variables below for the paths. The certificates are used for `elasticsearch__{http,transport}_{cert,key}`. It is possible to either use different certificates for http and transport or use the same for both. -### Bootstrap the First Node(s) +### Bootstrap the First Node(s) (automated) You can bootstrap one or more nodes - both options work. Starting with a single node makes troubleshooting easier. However, bear in mind that both the `master` and `data` roles are required for bootstrapping. Either the first node must have these roles, or multiple nodes must be bootstrapped. @@ -91,9 +121,9 @@ ansible-playbook --inventory inventory linuxfabrik.lfops.elasticsearch --limit n Attention: Only include the first node in `elasticsearch__cluster_initial_master_nodes`. If you include nodes that do not exist yet, the first node will wait indefinitely for them and the cluster will not form. -### Verify Cluster State +### Verify Cluster State (manual) -On the first node, generate the initial password and verify the cluster state: +On the first node, generate the initial password (**manual** - Elasticsearch requires this interactive command) and verify the cluster state: ```bash /usr/share/elasticsearch/bin/elasticsearch-reset-password --username elastic @@ -114,9 +144,9 @@ curl --cacert "$elastic_cacert" \ ``` -### Deploy Additional Nodes +### Deploy Additional Nodes (automated) -Note: Deploy remaining nodes without `elasticsearch__cluster_initial_master_nodes`. +Deploy remaining nodes without `elasticsearch__cluster_initial_master_nodes`. ```bash ansible-playbook --inventory inventory linuxfabrik.lfops.elasticsearch --limit node2.example.com,node3.example.com @@ -124,7 +154,7 @@ ansible-playbook --inventory inventory linuxfabrik.lfops.elasticsearch --limit n The nodes will automatically join the existing cluster using `elasticsearch__discovery_seed_hosts`. -### Clear Initial Master Nodes Configuration +### Clear Initial Master Nodes Configuration (automated) After at least one additional master node has joined, remove the `cluster.initial_master_nodes` setting from the first node: @@ -134,7 +164,7 @@ ansible-playbook --inventory inventory linuxfabrik.lfops.elasticsearch --limit n This prevents issues when the first node is restarted. -### Verify Complete Cluster +### Verify Complete Cluster (manual) Verify all nodes have joined the cluster: ```bash @@ -151,7 +181,7 @@ The status should be `green` with all nodes listed. ## Adding a New Node to an Existing Cluster -1. Generate certificates for the new node using the existing CA. On the node where the CA is stored: +1. Generate certificates for the new node using the existing CA (**manual**). On the node where the CA is stored: ```bash cat > /tmp/new-node-cert.yml < `elasticsearch__action_auto_create_index__group_var` | Automatic index creation allows any index to be created automatically.
For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). | `true` | -| `elasticsearch__ca_cert` | ASCII-armored PEM CA certificate for TLS. When set, enables manual certificate management mode and disables auto-enrollment. All cluster nodes should use the same CA certificate. | unset | -| `elasticsearch__cluster_initial_master_nodes` | A list of initial master-eligible nodes. The entries have to match the `elasticsearch__node_name`. **IMPORTANT:** Only use this during initial cluster bootstrap via `--extra-vars`. Never set this permanently in inventory. After the first successful cluster start, all subsequent runs should omit this variable. | unset | -| `elasticsearch__cluster_name__host_var` /
`elasticsearch__cluster_name__group_var` | A descriptive name for your cluster.
For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). | `'my-application'` | -| `elasticsearch__cluster_routing_allocation_awareness_attributes` | List of awareness attribute names to enable [shard allocation awareness](https://www.elastic.co/docs/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness). Distributes replicas across different attribute values to minimize risk of data loss during failures. Configure the same attributes on all master-eligible nodes | `[]` | -| `elasticsearch__cluster_routing_allocation_awareness_force` | Dictionary for forced awareness to prevent replica overloading when a location fails. Key is the attribute name, value is list of expected attribute values. Elasticsearch will leave replicas unassigned rather than concentrating them in remaining locations. | `{}` | -| `elasticsearch__cluster_routing_allocation_disk_watermark_flood_stage` | Float `0 <= n <= 1`. flood stage percentage for [disk-based shard allocation](https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings#disk-based-shard-allocation). Elasticsearch enforces a read-only index block on every index that has one or more shards allocated on nodes having at least one disk exceeding the flood stage | `0.95` | -| `elasticsearch__cluster_routing_allocation_disk_watermark_high` | Float `0 <= n <= 1`. Low watermark percentage for [disk-based shard allocation](https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings#disk-based-shard-allocation). Elasticsearch will not allocate shards to nodes whose disk usage exceeds this percentage | `0.9` | -| `elasticsearch__cluster_routing_allocation_disk_watermark_low` | Float `0 <= n <= 1`. Low watermark percentage for [disk-based shard allocation](https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings#disk-based-shard-allocation). Elasticsearch will not allocate shards to nodes whose disk usage exceeds this percentage | `0.85` | -| `elasticsearch__discovery_seed_hosts` | A list of IPs or hostnames that point to all master-eligible nodes of the cluster. The port defaults to 9300 but can be overwritten using `:9301`, for example. | unset | -| `elasticsearch__http_cert` | ASCII-armored PEM HTTP certificate. | unset | -| `elasticsearch__http_key` | ASCII-armored PEM HTTP private key. | unset | -| `elasticsearch__log4j2_retention_days` | Number of days to retain rotated Elasticsearch log files (server, deprecation, slowlog, audit). All log appenders rotate daily and delete files older than this value. | `3` | -| `elasticsearch__network_host` | Sets the address for both HTTP and transport traffic. Accepts an IP address, a hostname, or a [special value](https://www.elastic.co/guide/en/elasticsearch/reference/8.19/modules-network.html#network-interface-values). | `'_local_'` | -| `elasticsearch__node_attributes` | Dictionary of custom node attributes. Can be used for shard allocation awareness. Each attribute identifies a node's physical location or characteristic. | `{}` | -| `elasticsearch__node_name` | A descriptive name for the node | `'{{ ansible_facts["nodename"] }}'` | -| `elasticsearch__node_roles` | List of roles for this node. Available roles: `master`, `data`, `data_content`, `data_hot`, `data_warm`, `data_cold`, `data_frozen`, `ingest`, `ml`, `remote_cluster_client`, `transform`, `voting_only`. See [Elasticsearch node roles documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles). | unset | -| `elasticsearch__path_data` | Path to the directory where Elasticsearch stores its data. | `/var/lib/elasticsearch` | -| `elasticsearch__path_repos` | Paths pointing to [Shared file system repositories](https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository) used for snapshots (backups). | `[]` | -| `elasticsearch__raw` | Multiline string. Raw content which will be appended to the `elasticsearch.yml` config file. | unset | -| `elasticsearch__service_enabled` | Enables or disables the elasticsearch service, analogous to `systemctl enable/disable --now`. | `true` | -| `elasticsearch__service_state` | Controls the state of the elasticsearch service, analogous to `systemctl start/stop/restart/reload`. Possible options:
* `started`
* `stopped`
* `restarted`
* `reloaded` | `'started'` | -| `elasticsearch__transport_cert` | ASCII-armored PEM transport certificate. | unset | -| `elasticsearch__transport_key` | ASCII-armored PEM transport private key. | unset | +`elasticsearch__action_auto_create_index__host_var` / `elasticsearch__action_auto_create_index__group_var` + +* Automatic index creation allows any index to be created automatically. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: Bool. +* Default: `true` + +`elasticsearch__ca_cert` + +* ASCII-armored PEM CA certificate for TLS. When set, enables manual certificate management mode and disables auto-enrollment. All cluster nodes should use the same CA certificate. +* Type: String. +* Default: unset + +`elasticsearch__cluster_initial_master_nodes` + +* A list of initial master-eligible nodes. The entries have to match the `elasticsearch__node_name`. Only use this during initial cluster bootstrap via `--extra-vars`. Never set this permanently in inventory. After the first successful cluster start, all subsequent runs should omit this variable. +* Type: List of strings. +* Default: unset + +`elasticsearch__cluster_name__host_var` / `elasticsearch__cluster_name__group_var` + +* A descriptive name for your cluster. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: String. +* Default: `'my-application'` + +`elasticsearch__cluster_routing_allocation_awareness_attributes` + +* List of awareness attribute names to enable [shard allocation awareness](https://www.elastic.co/docs/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness). Distributes replicas across different attribute values to minimize risk of data loss during failures. Configure the same attributes on all master-eligible nodes. +* Type: List of strings. +* Default: `[]` + +`elasticsearch__cluster_routing_allocation_awareness_force` + +* Dictionary for forced awareness to prevent replica overloading when a location fails. Key is the attribute name, value is list of expected attribute values. Elasticsearch will leave replicas unassigned rather than concentrating them in remaining locations. +* Type: Dictionary. +* Default: `{}` + +`elasticsearch__cluster_routing_allocation_disk_watermark_flood_stage` + +* Flood stage percentage for [disk-based shard allocation](https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings#disk-based-shard-allocation). Elasticsearch enforces a read-only index block on every index that has one or more shards allocated on nodes having at least one disk exceeding the flood stage. +* Type: Float (`0 <= n <= 1`). +* Default: `0.95` + +`elasticsearch__cluster_routing_allocation_disk_watermark_high` + +* High watermark percentage for [disk-based shard allocation](https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings#disk-based-shard-allocation). Elasticsearch will not allocate shards to nodes whose disk usage exceeds this percentage. +* Type: Float (`0 <= n <= 1`). +* Default: `0.9` + +`elasticsearch__cluster_routing_allocation_disk_watermark_low` + +* Low watermark percentage for [disk-based shard allocation](https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings#disk-based-shard-allocation). Elasticsearch will not allocate shards to nodes whose disk usage exceeds this percentage. +* Type: Float (`0 <= n <= 1`). +* Default: `0.85` + +`elasticsearch__discovery_seed_hosts` + +* A list of IPs or hostnames that point to all master-eligible nodes of the cluster. The port defaults to 9300 but can be overwritten using `:9301`, for example. +* Type: List of strings. +* Default: unset + +`elasticsearch__http_cert` + +* ASCII-armored PEM HTTP certificate. +* Type: String. +* Default: unset + +`elasticsearch__http_key` + +* ASCII-armored PEM HTTP private key. +* Type: String. +* Default: unset + +`elasticsearch__log4j2_retention_days` + +* Number of days to retain rotated Elasticsearch log files (server, deprecation, slowlog, audit). All log appenders rotate daily and delete files older than this value. +* Type: Number. +* Default: `3` + +`elasticsearch__network_host` + +* Sets the address for both HTTP and transport traffic. Accepts an IP address, a hostname, or a [special value](https://www.elastic.co/guide/en/elasticsearch/reference/8.19/modules-network.html#network-interface-values). +* Type: String. +* Default: `'_local_'` + +`elasticsearch__node_attributes` + +* Dictionary of custom node attributes. Can be used for shard allocation awareness. Each attribute identifies a node's physical location or characteristic. +* Type: Dictionary. +* Default: `{}` + +`elasticsearch__node_name` + +* A descriptive name for the node. +* Type: String. +* Default: `'{{ ansible_facts["nodename"] }}'` + +`elasticsearch__node_roles` + +* List of roles for this node. Available roles: `master`, `data`, `data_content`, `data_hot`, `data_warm`, `data_cold`, `data_frozen`, `ingest`, `ml`, `remote_cluster_client`, `transform`, `voting_only`. See [Elasticsearch node roles documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles). +* Type: List of strings. +* Default: unset + +`elasticsearch__path_data` + +* Path to the directory where Elasticsearch stores its data. +* Type: String. +* Default: `'/var/lib/elasticsearch'` + +`elasticsearch__path_repos` + +* Paths pointing to [Shared file system repositories](https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository) used for snapshots (backups). +* Type: List of strings. +* Default: `[]` + +`elasticsearch__raw` + +* Raw content which will be appended to the `elasticsearch.yml` config file. +* Type: Multiline string. +* Default: unset + +`elasticsearch__service_enabled` + +* Enables or disables the Elasticsearch service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`elasticsearch__service_state` + +* Controls the state of the Elasticsearch service, analogous to `systemctl start/stop/restart/reload`. Possible options: `started`, `stopped`, `restarted`, `reloaded`. +* Type: String. +* Default: `'started'` + +`elasticsearch__transport_cert` + +* ASCII-armored PEM transport certificate. +* Type: String. +* Default: unset + +`elasticsearch__transport_key` + +* ASCII-armored PEM transport private key. +* Type: String. +* Default: unset + Example: ```yaml @@ -236,7 +386,6 @@ elasticsearch__http_cert: '{{ lookup("ansible.builtin.file", "{{ inventory_dir } elasticsearch__http_key: '{{ lookup("ansible.builtin.file", "{{ inventory_dir }}/host_files/{{ inventory_hostname }}/etc/elasticsearch/certs/http.key") }}' elasticsearch__log4j2_retention_days: 7 elasticsearch__network_host: '0.0.0.0' -elasticsearch__network_host: '_local_' # or '127.0.0.1' for single node elasticsearch__node_attributes: datacenter: 'dc1' host: 'pod01' diff --git a/roles/elasticsearch/tasks/main.yml b/roles/elasticsearch/tasks/main.yml index 28d9635f9..32ebcbb35 100644 --- a/roles/elasticsearch/tasks/main.yml +++ b/roles/elasticsearch/tasks/main.yml @@ -125,15 +125,16 @@ elasticsearch__cluster_routing_allocation_disk_watermark_flood_stage <= 1 fail_msg: 'elasticsearch__cluster_routing_allocation_disk_watermark_flood_stage must be a number between 0 and 1' - - name: 'ensure elasticsearch__cluster_routing_allocation_disk_watermark_low < elasticsearch__cluster_routing_allocation_disk_watermark_high < elasticsearch__cluster_routing_allocation_disk_watermark_flood_stage' + - name: 'ensure elasticsearch__cluster_routing_allocation_disk_watermark_low <= elasticsearch__cluster_routing_allocation_disk_watermark_high <= elasticsearch__cluster_routing_allocation_disk_watermark_flood_stage' ansible.builtin.assert: that: > - elasticsearch__cluster_routing_allocation_disk_watermark_low < elasticsearch__cluster_routing_allocation_disk_watermark_high and - elasticsearch__cluster_routing_allocation_disk_watermark_high < elasticsearch__cluster_routing_allocation_disk_watermark_flood_stage + elasticsearch__cluster_routing_allocation_disk_watermark_low <= elasticsearch__cluster_routing_allocation_disk_watermark_high and + elasticsearch__cluster_routing_allocation_disk_watermark_high <= elasticsearch__cluster_routing_allocation_disk_watermark_flood_stage fail_msg: 'elasticsearch__cluster_routing_allocation_disk_watermark_low must not exceed elasticsearch__cluster_routing_allocation_disk_watermark_high' - name: 'deploy /etc/elasticsearch/elasticsearch.yml' ansible.builtin.template: + backup: true src: 'etc/elasticsearch/elasticsearch.yml.j2' dest: '/etc/elasticsearch/elasticsearch.yml' owner: 'root' @@ -167,6 +168,7 @@ - name: 'deploy {{ __elasticsearch__sysconfig_file_path }}' ansible.builtin.template: + backup: true src: 'etc/sysconfig/elasticsearch.j2' dest: '{{ __elasticsearch__sysconfig_file_path }}' owner: 'root' @@ -176,6 +178,7 @@ - name: 'deploy /tmp/certutil.yml' ansible.builtin.template: + backup: true src: 'tmp/certutil.yml.j2' dest: '/tmp/certutil.yml' owner: 'root' diff --git a/roles/example/README.md b/roles/example/README.md index fac13eed2..6089b1b08 100644 --- a/roles/example/README.md +++ b/roles/example/README.md @@ -1,73 +1,232 @@ # Ansible Role linuxfabrik.lfops.example -This role configures something using [example](https://example.com/). Currently, this role Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. +This role installs and configures [Example](https://example.com/). Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +This role also serves as a reference for consistent role development across LFOps. All `ansible.builtin.*` modules used in `tasks/main.yml` are documented with their most common parameters. -## Mandatory Requirements +This role is compatible with the following example versions: -* Enable the [example repository](https://example.com/). This can be done using the [linuxfabrik.lfops.repo_example](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_example) role. -* Install EXAMPLE. This can be done using the [linuxfabrik.lfops.example](https://github.com/Linuxfabrik/lfops/tree/main/roles/example) role. -* On RHEL-compatible systems, enable the EPEL repository. This can be done using the [linuxfabrik.lfops.repo_epel](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_epel) role. +* 1.0.0 +* 2.0.0 -**Attention** +## Mandatory Requirements -> Make sure that this condition is met. +* Enable the example repository. This can be done using the [linuxfabrik.lfops.repo_example](https://github.com/Linuxfabrik/lfops/tree/main/roles/repo_example) role. If you use the [Example Playbook](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/example.yml), this is automatically done for you. ## Optional Requirements -* Step 1 -* Step 2 +* Install the optional dependency. This can be done using the [linuxfabrik.lfops.optional_dependency](https://github.com/Linuxfabrik/lfops/tree/main/roles/optional_dependency) role. ## Tags -| Tag | What it does | -| --- | ------------ | -| `example` | * step 1
* step 2 | -| `example:configure` | * step 1
* step 2 | -| `example:script` | * step 1
* step 2 | -| `example:state` | * step 1
* step 2 | +`example` +* Installs required packages and plugins. +* Creates the example system user and group. +* Deploys the configuration files. +* Ensures the example service is in the desired state. +* Triggers: example.service restart. -## Post-Installation Steps +`example:configure` -After setting up a single node or cluster, generate the initial password for the `example` user: +* Deploys configuration files. +* Triggers: example.service restart. -```bash -/usr/share/elasticsearch/bin/elasticsearch-reset-password --username example -``` +`example:state` + +* Manages the service state (start, stop, enable, disable). +* Triggers: none. + +`example:plugin` + +* Manages optional example plugins (install/remove). +* Triggers: none. + +`example:user` + +* Manages application users via the REST API. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `example__var1` | List/Dict/String/Number/Bool. Description. | +`example__version` + +* The version of example to install. +* Type: String. Example: ```yaml # mandatory -example__var1: 'value' +example__version: '3.2.1' ``` ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `example__logrotate` | Number. Log files are rotated `count` days before being removed or mailed to the address specified in a `logrotate` mail directive. If count is `0`, old versions are removed rather than rotated. If count is `-1`, old logs are not removed at all (use with caution, may waste performance and disk space). | `{{ logrotate__rotate \| d(14) }}` | -| `example__service_enabled` | Bool. Enables or disables the service, analogous to `systemctl enable/disable --now`. | `true` | -| `example__service_state`| String. Changes the state of the service, analogous to `systemctl start/stop/restart/reload`. Possible options:
  • `reloaded`
  • `restarted`
  • `started`
  • `stopped`
| `'started'` | -| `example__var2` | List/Dict/String/Number/Bool. Description. | `'default'` | -| `example__var3` | List of dictionaries. Description. Subkeys:
  • `name`: Mandatory, string. The package name.
  • `state`: Mandatory, string. State of the package, one of `present`, `absent`.
| `[]` | +`example__conf_log_level__host_var` / `example__conf_log_level__group_var` + +* The log level. +* Type: String. One of `debug`, `info`, `warn`, `error`. +* Default: `'info'` + +`example__conf_max_connections__host_var` / `example__conf_max_connections__group_var` + +* Maximum number of concurrent connections. Must be between 1 and 10000. +* Type: Number. +* Default: `100` + +`example__conf_worker_threads` + +* Number of worker threads for request processing. +* Type: Number. +* Default: 1.0.0: `4`, 2.0.0: `8` + +`example__lib_version` + +* The version of the [Linuxfabrik Python Libraries](https://github.com/Linuxfabrik/lib) to install. +* Type: String. + +`example__listeners__host_var` / `example__listeners__group_var` + +* Network listeners for the example service. Items are identified by a composite key of `name` + `port`, allowing the same name on different ports. +* Type: List of dictionaries. +* Default: + + ```yaml + - name: 'default' + port: 8080 + - name: 'default' + port: 8443 + ssl: true + ``` + +* Subkeys: + + * `name`: + + * Mandatory. Listener name. + * Type: String. + + * `port`: + + * Mandatory. Port number. + * Type: Number. + + * `ssl`: + + * Optional. Enable SSL for this listener. + * Type: Bool. + * Default: `false` + + * `state`: + + * Optional. `present` or `absent`. + * Type: String. + * Default: `'present'` + +`example__logrotate` + +* Log files are rotated `count` days before being removed. If count is `0`, old versions are removed rather than rotated. If count is `-1`, old logs are not removed at all (use with caution, may waste performance and disk space). +* Type: Number. +* Default: `{{ logrotate__rotate | d(14) }}` + +`example__maintenance_cron_minute` + +* Minute of the hour at which the maintenance cron job runs. +* Type: Number. +* Default: `{{ 59 | random(seed=inventory_hostname) }}` + +`example__plugins__host_var` / `example__plugins__group_var` + +* Optional plugins to install as OS packages. +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Package name of the plugin. + * Type: String. + + * `state`: + + * Optional. `present` or `absent`. + * Type: String. + * Default: `'present'` + +`example__service_enabled` + +* Enables or disables the service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`example__service_state` + +* Changes the state of the service, analogous to `systemctl start/stop/restart/reload`. +* Type: String. One of `reloaded`, `restarted`, `started`, `stopped`. +* Default: `'started'` + +`example__users__host_var` / `example__users__group_var` + +* Application users to manage. +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Username. + * Type: String. + + * `password`: + + * Mandatory for `state: 'present'` users. Password. + * Type: String. + + * `comment`: + + * Optional. User description. + * Type: String. + + * `state`: + + * Optional. `present` or `absent`. + * Type: String. + * Default: `'present'` Example: ```yaml # optional -example__var2: 'value' +example__conf_log_level__host_var: 'debug' +example__conf_max_connections__host_var: 200 +example__conf_worker_threads: 16 +example__lib_version: '2.4.0' +example__listeners__host_var: + - name: 'default' + port: 8443 + state: 'absent' + - name: 'api' + port: 9090 +example__logrotate: 7 +example__maintenance_cron_minute: 30 +example__plugins__host_var: + - name: 'example-plugin-auth-ldap' + - name: 'example-plugin-cache-redis' + - name: 'example-plugin-legacy-api' + state: 'absent' +example__service_enabled: true +example__service_state: 'started' +example__users__host_var: + - name: 'example-admin' + password: 'linuxfabrik' + comment: 'Admin Account' + - name: 'old-user' + state: 'absent' ``` diff --git a/roles/example/defaults/main.yml b/roles/example/defaults/main.yml new file mode 100644 index 000000000..d49936512 --- /dev/null +++ b/roles/example/defaults/main.yml @@ -0,0 +1,87 @@ +# --- scalar injection pattern --- +# priority: host > group > dependent > role +example__conf_log_level__dependent_var: '' +example__conf_log_level__group_var: '' +example__conf_log_level__host_var: '' +example__conf_log_level__role_var: 'info' +example__conf_log_level__combined_var: '{{ + example__conf_log_level__host_var if (example__conf_log_level__host_var | string | length) else + example__conf_log_level__group_var if (example__conf_log_level__group_var | string | length) else + example__conf_log_level__dependent_var if (example__conf_log_level__dependent_var | string | length) else + example__conf_log_level__role_var + }}' + +example__conf_max_connections__dependent_var: '' +example__conf_max_connections__group_var: '' +example__conf_max_connections__host_var: '' +example__conf_max_connections__role_var: 100 +example__conf_max_connections__combined_var: '{{ + example__conf_max_connections__host_var if (example__conf_max_connections__host_var | string | length) else + example__conf_max_connections__group_var if (example__conf_max_connections__group_var | string | length) else + example__conf_max_connections__dependent_var if (example__conf_max_connections__dependent_var | string | length) else + example__conf_max_connections__role_var + }}' + +# version-specific default from vars/.yml, overridable by the user via inventory +example__conf_worker_threads: '{{ __example__conf_worker_threads }}' + +# referencing another role's variable keeps values consistent when the user sets it in +# their inventory. always provide a d() fallback, since role defaults are only loaded +# for roles that are part of the play. +example__logrotate: '{{ logrotate__rotate | d(14) }}' + +# use inventory_hostname as seed for random but idempotent values. +# this ensures each host gets a different minute, but the same minute on every run. +example__maintenance_cron_minute: '{{ 59 | random(seed=inventory_hostname) }}' + +# not all variables need the injection pattern. simple settings like service state +# are typically only set by the user, never injected by other roles. +example__service_enabled: true +example__service_state: 'started' + + +# --- list of dicts injection pattern --- +# combined using linuxfabrik.lfops.combine_lod +example__plugins__dependent_var: [] +example__plugins__group_var: [] +example__plugins__host_var: [] +example__plugins__role_var: [] +example__plugins__combined_var: '{{ ( + example__plugins__role_var + + example__plugins__dependent_var + + example__plugins__group_var + + example__plugins__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +example__users__dependent_var: [] +example__users__group_var: [] +example__users__host_var: [] +example__users__role_var: [] +example__users__combined_var: '{{ ( + example__users__role_var + + example__users__dependent_var + + example__users__group_var + + example__users__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +# --- list of dicts injection pattern with composite unique key --- +# when items are identified by a combination of keys (e.g. same name on different ports), +# pass a list of key names to unique_key. +example__listeners__dependent_var: [] +example__listeners__group_var: [] +example__listeners__host_var: [] +example__listeners__role_var: + - name: 'default' + port: 8080 + - name: 'default' + port: 8443 + ssl: true +example__listeners__combined_var: '{{ ( + example__listeners__role_var + + example__listeners__dependent_var + + example__listeners__group_var + + example__listeners__host_var + ) | linuxfabrik.lfops.combine_lod(unique_key=["name", "port"]) + }}' diff --git a/roles/example/handlers/main.yml b/roles/example/handlers/main.yml new file mode 100644 index 000000000..a7cba4401 --- /dev/null +++ b/roles/example/handlers/main.yml @@ -0,0 +1,26 @@ +# handler names are prefixed with the role name to avoid collisions with other roles. +# use chained handlers (notify) when a validation step should precede the actual action. + +- name: 'example: validate config; restart example' + ansible.builtin.command: 'example --validate-config' + changed_when: false # no config change + notify: 'example: restart example' + +# skip restart if the service was just started (redundant) or if the user wants it stopped. +# __example__service_state_result is registered on a dedicated state-only task (not the enabled task), +# so that enabled-only changes don't prevent the restart. +- name: 'example: restart example' + ansible.builtin.service: + name: 'example' + state: 'restarted' + when: + - '__example__service_state_result is not changed' + - 'example__service_state != "stopped"' + +- name: 'example: reload example' + ansible.builtin.service: + name: 'example' + state: 'reloaded' + when: + - '__example__service_state_result is not changed' + - 'example__service_state != "stopped"' diff --git a/roles/example/meta/argument_specs.yml b/roles/example/meta/argument_specs.yml new file mode 100644 index 000000000..512cd94f8 --- /dev/null +++ b/roles/example/meta/argument_specs.yml @@ -0,0 +1,114 @@ +# argument_specs validates required variables and types automatically at role entry. +# use this for simple "is defined" / type checks. for complex validations +# (value ranges, cross-variable logic), use ansible.builtin.assert in the tasks. +argument_specs: + main: + options: + + example__conf_log_level__group_var: + type: 'str' + required: false + default: '' + description: 'The log level. Group-level override.' + + example__conf_log_level__host_var: + type: 'str' + required: false + default: '' + description: 'The log level. Host-level override.' + + example__conf_max_connections__group_var: + type: 'raw' + required: false + default: '' + description: 'Maximum number of concurrent connections. Group-level override.' + + example__conf_max_connections__host_var: + type: 'raw' + required: false + default: '' + description: 'Maximum number of concurrent connections. Host-level override.' + + example__conf_worker_threads: + type: 'int' + required: false + description: 'Number of worker threads for request processing.' + + example__lib_version: + type: 'str' + required: false + description: 'The version of the Linuxfabrik Python Libraries to install.' + + example__listeners__group_var: + type: 'list' + elements: 'dict' + required: false + default: [] + description: 'Network listeners. Group-level override.' + + example__listeners__host_var: + type: 'list' + elements: 'dict' + required: false + default: [] + description: 'Network listeners. Host-level override.' + + example__logrotate: + type: 'int' + required: false + description: 'Number of days to keep rotated log files.' + + example__maintenance_cron_minute: + type: 'int' + required: false + description: 'Minute of the hour at which the maintenance cron job runs.' + + example__plugins__group_var: + type: 'list' + elements: 'dict' + required: false + default: [] + description: 'Optional plugins to install as OS packages. Group-level override.' + + example__plugins__host_var: + type: 'list' + elements: 'dict' + required: false + default: [] + description: 'Optional plugins to install as OS packages. Host-level override.' + + example__service_enabled: + type: 'bool' + required: false + default: true + description: 'Enables or disables the service.' + + example__service_state: + type: 'str' + required: false + default: 'started' + choices: + - 'reloaded' + - 'restarted' + - 'started' + - 'stopped' + description: 'Desired state of the example service.' + + example__users__group_var: + type: 'list' + elements: 'dict' + required: false + default: [] + description: 'Application users to manage. Group-level override.' + + example__users__host_var: + type: 'list' + elements: 'dict' + required: false + default: [] + description: 'Application users to manage. Host-level override.' + + example__version: + type: 'str' + required: true + description: 'The version of example to install.' diff --git a/roles/example/tasks/RedHat.yml b/roles/example/tasks/RedHat.yml new file mode 100644 index 000000000..0653b6407 --- /dev/null +++ b/roles/example/tasks/RedHat.yml @@ -0,0 +1,9 @@ +- block: + + - name: 'Import the example RPM GPG key' + ansible.builtin.rpm_key: + key: 'https://example.com/rpm-gpg-key' + state: 'present' + + tags: + - 'example' diff --git a/roles/example/tasks/main.yml b/roles/example/tasks/main.yml new file mode 100644 index 000000000..862db70ef --- /dev/null +++ b/roles/example/tasks/main.yml @@ -0,0 +1,417 @@ +# This is the example role for linuxfabrik.lfops. +# It demonstrates the correct and consistent usage of the most common Ansible +# modules across LFOps. +# +# Anti-patterns (do NOT use these, use the alternative instead): +# - ansible.builtin.apt / ansible.builtin.dnf -> ansible.builtin.package +# - ansible.builtin.copy -> ansible.builtin.template +# - ansible.builtin.shell -> ansible.builtin.command +# - ansible.builtin.systemd -> ansible.builtin.service + + +- block: + + - name: 'Set platform/version specific variables' + ansible.builtin.import_role: + name: 'shared' + tasks_from: 'platform-variables.yml' + + tags: + - 'always' + + +# include_tasks is required here because import_tasks cannot handle dynamic variables. +- name: 'Perform platform/version specific tasks' + ansible.builtin.include_tasks: '{{ __task_file }}' + when: '__task_file | length > 0' + vars: + __task_file: '{{ lookup("ansible.builtin.first_found", __first_found_options) }}' + __first_found_options: + files: + - '{{ ansible_facts["distribution"] }}{{ ansible_facts["distribution_version"] }}.yml' + - '{{ ansible_facts["distribution"] }}{{ ansible_facts["distribution_major_version"] }}.yml' + - '{{ ansible_facts["distribution"] }}.yml' + - '{{ ansible_facts["os_family"] }}{{ ansible_facts["distribution_version"] }}.yml' + - '{{ ansible_facts["os_family"] }}{{ ansible_facts["distribution_major_version"] }}.yml' + - '{{ ansible_facts["os_family"] }}.yml' + paths: + - '{{ role_path }}/tasks' + skip: true + tags: + - 'always' + + +- block: + + # simple mandatory variable checks (required, type, choices) belong in meta/argument_specs.yml. + # use assert only for validations that argument_specs cannot express: + # value ranges, regex patterns, cross-variable dependencies. + - name: 'Check variable constraints' + ansible.builtin.assert: + that: + - 'example__conf_max_connections__combined_var | int >= 1' + - 'example__conf_max_connections__combined_var | int <= 10000' + quiet: true + fail_msg: 'example__conf_max_connections must be between 1 and 10000, got "{{ example__conf_max_connections__combined_var }}".' + + tags: + # use 'always' so the validation runs even when other roles reference these variables. + - 'always' + + +- block: + + - name: 'groupadd example' + ansible.builtin.group: + name: 'example' + state: 'present' + system: true + + - name: 'useradd example' + ansible.builtin.user: + name: 'example' + comment: 'Example Service' + group: 'example' + home: '/var/lib/example' + shell: '/sbin/nologin' + system: true + state: 'present' + + # always use ansible.builtin.package instead of ansible.builtin.apt or ansible.builtin.dnf. + # always use state: 'present', not 'latest'. + - name: 'Install required packages' + ansible.builtin.package: + name: '{{ __example__required_packages }}' + state: 'present' + + - name: 'curl --output /tmp/example-{{ example__version }}.tar.gz https://example.com/releases/example-{{ example__version }}.tar.gz' + ansible.builtin.get_url: + url: 'https://example.com/releases/example-{{ example__version }}.tar.gz' + dest: '/tmp/example-{{ example__version }}.tar.gz' + owner: 'root' + group: 'root' + mode: 0o644 + + - name: 'tar xzf /tmp/example-{{ example__version }}.tar.gz -C /opt/example' + ansible.builtin.unarchive: + src: '/tmp/example-{{ example__version }}.tar.gz' + dest: '/opt/example' + remote_src: true + owner: 'example' + group: 'example' + creates: '/opt/example/bin/example' + + # use ansible.builtin.command instead of ansible.builtin.shell wherever possible. + # always provide changed_when or creates/removes for idempotency. + - name: 'example {{ __example__init_flag }}' + ansible.builtin.command: 'example {{ __example__init_flag }}' + args: + creates: '/var/lib/example/initialized' + + - name: 'example --check-config' + ansible.builtin.command: 'example --check-config' + changed_when: false # read-only command, never changes anything + check_mode: false # also run in check mode + + # for multi-parameter commands, use the >- folded scalar style + - name: 'example --setup' + ansible.builtin.command: >- + example --setup + --log-level '{{ example__conf_log_level__combined_var }}' + --max-connections {{ example__conf_max_connections__combined_var }} + register: '__example__setup_result' # prefix role internal variables with `__` + changed_when: '"already configured" not in __example__setup_result["stdout"]' + failed_when: '__example__setup_result["rc"] != 0 and "already configured" not in __example__setup_result["stdout"]' + + - name: 'example --generate-cache' # noqa: no-changed-when - no easy way to detect changes + ansible.builtin.command: 'example --generate-cache' + + tags: + - 'example' + + +- block: + + - name: 'Gather package facts' + ansible.builtin.package_facts: # yamllint disable-line rule:empty-values + check_mode: false + + - name: 'Set installed version fact' + ansible.builtin.set_fact: + __example__installed_server_version: '{{ ansible_facts["packages"]["example-server"][0]["version"] }}' + + - name: 'Load default values for {{ __example__installed_server_version }}' + ansible.builtin.include_vars: 'vars/{{ __example__installed_server_version }}.yml' + + tags: + # make sure to list all the tags that required the version-dependent variables + - 'example' + - 'example:configure' + + +- block: + + # create directory + - name: 'mkdir -p /etc/example' + ansible.builtin.file: + path: '/etc/example' + state: 'directory' + owner: 'example' + group: 'example' + mode: 0o755 + + # create symlink + - name: 'ln -s /etc/example/example.conf /etc/example/current.conf' + ansible.builtin.file: + src: '/etc/example/example.conf' + path: '/etc/example/current.conf' + state: 'link' + + # set permissions + - name: 'chmod 0600 /etc/example/secrets.conf' + ansible.builtin.file: + path: '/etc/example/secrets.conf' + owner: 'example' + group: 'example' + mode: 0o600 + + # remove file + - name: 'rm -f /etc/example/deprecated.conf' + ansible.builtin.file: + path: '/etc/example/deprecated.conf' + state: 'absent' + + # this is the standard way to load platform-specific variables (see top of this file). + # another common use: include shared tasks from the 'shared' role. + - name: 'Clone the lib repo' + ansible.builtin.import_role: + name: 'shared' + tasks_from: 'clone-lib-repo.yml' + vars: + shared__lib_version: '{{ example__lib_version }}' + when: + - 'example__lib_version is defined and example__lib_version | length > 0' + + # use a sub-block with a block-level when to avoid repeating the same condition on every task. + - block: + + - name: 'semanage port --add --type example_port_t --proto tcp 8080' + community.general.seport: + ports: 8080 + proto: 'tcp' + setype: 'example_port_t' + state: 'present' + + - name: 'setsebool -P httpd_can_network_connect on' + ansible.posix.seboolean: + name: 'httpd_can_network_connect' + persistent: true + state: true + + when: + - 'ansible_facts["selinux"]["status"] != "disabled"' + + tags: + - 'example' + - 'example:configure' + + +- block: + + # always use ansible.builtin.template instead of ansible.builtin.copy, ansible.builtin.lineinfile or ansible.builtin.blockinfile. + # always set: backup, src (relative, mirroring dest path), dest, owner, group, mode. + - name: 'Deploy /etc/example/example.conf' + ansible.builtin.template: + backup: true + src: 'etc/example/example.conf.j2' + dest: '/etc/example/example.conf' + owner: 'example' + group: 'example' + mode: 0o400 # contains secrets + notify: 'example: validate config; restart example' + + # os-specific template selection using first_found + - name: 'Deploy /etc/example/platform.conf' + ansible.builtin.template: + backup: true + src: '{{ lookup("ansible.builtin.first_found", __example__platform_template_file) }}' + dest: '/etc/example/platform.conf' + owner: 'example' + group: 'example' + mode: 0o644 + vars: + __example__platform_template_file: + files: + - 'etc/example/{{ ansible_facts["distribution"] }}{{ ansible_facts["distribution_major_version"] }}-platform.conf.j2' + - 'etc/example/{{ ansible_facts["os_family"] }}{{ ansible_facts["distribution_major_version"] }}-platform.conf.j2' + paths: + - '{{ role_path }}/templates' + notify: 'example: validate config; restart example' + + # platform-specific dest path using a variable from vars/.yml + - name: 'Deploy {{ __example__sysconfig_path }}' + ansible.builtin.template: + backup: true + src: 'etc/example/sysconfig.j2' + dest: '{{ __example__sysconfig_path }}' + owner: 'root' + group: 'root' + mode: 0o644 + notify: 'example: restart example' + + # remove rpmnew/rpmsave (and Debian equivalents) after deploying config files + # that might be overwritten by package updates. + - name: 'Remove rpmnew / rpmsave (and Debian equivalents)' + ansible.builtin.include_role: + name: 'shared' + tasks_from: 'remove-rpmnew-rpmsave.yml' + vars: + shared__remove_rpmnew_rpmsave_config_file: '{{ item }}' + loop: + - '/etc/example/example.conf' + - '/etc/example/platform.conf' + - '{{ __example__sysconfig_path }}' + + tags: + - 'example' + - 'example:configure' + + +- block: + + # for lists with state support, use the selectattr pattern with map(attribute="name") + # to extract just the names for modules that accept a list (like ansible.builtin.package). + - name: 'Ensure absent plugins are removed' + ansible.builtin.package: + name: '{{ example__plugins__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | map(attribute="name") }}' + state: 'absent' + + - name: 'Ensure present plugins are installed' + ansible.builtin.package: + name: '{{ (example__plugins__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | map(attribute="name")) + + (example__plugins__combined_var | selectattr("state", "undefined") | map(attribute="name")) }}' + state: 'present' + + tags: + - 'example' + - 'example:plugin' + + +- block: + + # always use ansible.builtin.service instead of ansible.builtin.systemd. + # split enabled and state into separate tasks so that the register on the state task + # only reflects actual state changes (started/stopped), not enabled/disabled changes. + # this allows the handler to correctly skip a restart when the service was just started, + # without false-positives from enabled-only changes. + - name: 'systemctl {{ example__service_enabled | bool | ternary("enable", "disable") }} example.service' + ansible.builtin.service: + name: 'example.service' + enabled: '{{ example__service_enabled | bool }}' + + - name: 'systemctl {{ example__service_state }} example.service' + ansible.builtin.service: + name: 'example.service' + state: '{{ example__service_state }}' + register: '__example__service_state_result' + + tags: + - 'example' + - 'example:state' + + +- block: + + - name: 'Wait for port 8080 to become available' + ansible.builtin.wait_for: + port: 8080 + host: '127.0.0.1' + delay: 5 + timeout: 30 + state: 'started' + + # GET request with response validation + - name: 'curl http://localhost:8080/api/health' + ansible.builtin.uri: + url: 'http://localhost:8080/api/health' + method: 'GET' + return_content: true + status_code: 200 + register: '__example__api_health_result' + changed_when: false + check_mode: false + + # fail + when and assert are functionally equivalent for single conditions. + # prefer assert for consistency with the rest of the role. + - name: 'Fail if API health check failed' + ansible.builtin.assert: + that: + - '"healthy" in __example__api_health_result["content"]' + fail_msg: 'API is not healthy: {{ __example__api_health_result["content"] }}' + + tags: + - 'example' + + +- block: + + # optionally, add debug messages for combined vars. + # this makes it easier for the user to see what the role will do. + - name: 'Combined Users' + ansible.builtin.debug: + var: 'example__users__combined_var' + + # DELETE request with JSON body + - name: 'Remove users via REST API' + ansible.builtin.uri: + url: 'http://127.0.0.1:8080/api/user/{{ item["name"] }}' + method: 'DELETE' + headers: + Accept: 'application/json' + status_code: + - 200 + - 404 + loop: '{{ example__users__combined_var }}' + loop_control: + label: '{{ item["name"] }}' + when: + - 'item["state"] | d("present") == "absent"' + + - name: 'Create users via REST API' + ansible.builtin.uri: + url: 'http://127.0.0.1:8080/api/user' + method: 'POST' + body: > + { + "name": "{{ item["name"] }}", + "password": "{{ item["password"] }}", + "comment": "{{ item["comment"] | d("") }}" + } + body_format: 'json' + headers: + Accept: 'application/json' + status_code: 201 + loop: '{{ example__users__combined_var }}' + loop_control: + label: '{{ item["name"] }}' + when: + - 'item["state"] | d("present") != "absent"' + + tags: + - 'example' + - 'example:user' + + +- block: + + # use flush_handlers when subsequent tasks depend on the handler having run. + - name: 'Flush handlers so that the service can be used by other roles later' + ansible.builtin.meta: 'flush_handlers' + + tags: + # make sure to list all tags which might need the flush_handlers to run. + - 'example' + - 'example:configure' + - 'example:plugin' + - 'example:state' + - 'example:user' diff --git a/roles/example/templates/etc/example/Debian-platform.conf.j2 b/roles/example/templates/etc/example/Debian-platform.conf.j2 new file mode 100644 index 000000000..c82d7a315 --- /dev/null +++ b/roles/example/templates/etc/example/Debian-platform.conf.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} +# 2026041001 + +[platform] +package_manager = apt +service_manager = systemd +default_shell = /bin/bash diff --git a/roles/example/templates/etc/example/RedHat-platform.conf.j2 b/roles/example/templates/etc/example/RedHat-platform.conf.j2 new file mode 100644 index 000000000..bdb5f26ae --- /dev/null +++ b/roles/example/templates/etc/example/RedHat-platform.conf.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} +# 2026041001 + +[platform] +package_manager = dnf +service_manager = systemd +default_shell = /bin/bash diff --git a/roles/example/templates/etc/example/RedHat8-platform.conf.j2 b/roles/example/templates/etc/example/RedHat8-platform.conf.j2 new file mode 100644 index 000000000..5418066e2 --- /dev/null +++ b/roles/example/templates/etc/example/RedHat8-platform.conf.j2 @@ -0,0 +1,8 @@ +# {{ ansible_managed }} +# 2026041001 + +[platform] +package_manager = dnf +service_manager = systemd +default_shell = /bin/bash +legacy_crypto = true diff --git a/roles/example/templates/etc/example/example.conf.j2 b/roles/example/templates/etc/example/example.conf.j2 new file mode 100644 index 000000000..8211365ed --- /dev/null +++ b/roles/example/templates/etc/example/example.conf.j2 @@ -0,0 +1,23 @@ +# {{ ansible_managed }} +# 2026041201 + +[server] +log_level = {{ example__conf_log_level__combined_var }} +max_connections = {{ example__conf_max_connections__combined_var }} +worker_threads = {{ example__conf_worker_threads }} +{% if __example__installed_server_version is ansible.builtin.version('2.0.0', '>=') %} +data_format = v2 +{% endif %} + +[plugins] +{% for item in example__plugins__combined_var if item['state'] | d('present') != 'absent' %} +load = {{ item['name'] }} +{% endfor %} + +[listeners] +{% for item in example__listeners__combined_var if item['state'] | d('present') != 'absent' %} +listen = {{ item['name'] }}:{{ item['port'] }}{% if item['ssl'] | d(false) | bool %} ssl{% endif %} +{% endfor %} + +[maintenance] +cron_minute = {{ example__maintenance_cron_minute }} diff --git a/roles/example/templates/etc/example/sysconfig.j2 b/roles/example/templates/etc/example/sysconfig.j2 new file mode 100644 index 000000000..2ff854f22 --- /dev/null +++ b/roles/example/templates/etc/example/sysconfig.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} +# 2026041001 + +EXAMPLE_HEAP_SIZE="512m" +EXAMPLE_LOG_ROTATE={{ example__logrotate }} diff --git a/roles/example/vars/1.0.0.yml b/roles/example/vars/1.0.0.yml new file mode 100644 index 000000000..aedac78f3 --- /dev/null +++ b/roles/example/vars/1.0.0.yml @@ -0,0 +1,8 @@ +# only use `__` prefixed (role-internal) vars or `__role_var` suffixed vars in here. +# vars/ has higher precedence than the inventory, so user-facing vars placed here +# cannot be overwritten by group_vars or host_vars. + +__example__conf_worker_threads: 4 + +# internal only, not exposed in defaults — the user must not override this +__example__init_flag: '--init' diff --git a/roles/example/vars/2.0.0.yml b/roles/example/vars/2.0.0.yml new file mode 100644 index 000000000..f9bc26bf5 --- /dev/null +++ b/roles/example/vars/2.0.0.yml @@ -0,0 +1,8 @@ +# only use `__` prefixed (role-internal) vars or `__role_var` suffixed vars in here. +# vars/ has higher precedence than the inventory, so user-facing vars placed here +# cannot be overwritten by group_vars or host_vars. + +__example__conf_worker_threads: 8 + +# internal only, not exposed in defaults — the user must not override this +__example__init_flag: '--initialize' diff --git a/roles/example/vars/Debian.yml b/roles/example/vars/Debian.yml new file mode 100644 index 000000000..5c70c9c2a --- /dev/null +++ b/roles/example/vars/Debian.yml @@ -0,0 +1,4 @@ +__example__required_packages: + - 'example-server' + - 'example-tools' +__example__sysconfig_path: '/etc/default/example' diff --git a/roles/example/vars/RedHat.yml b/roles/example/vars/RedHat.yml new file mode 100644 index 000000000..7e3404b9f --- /dev/null +++ b/roles/example/vars/RedHat.yml @@ -0,0 +1,4 @@ +__example__required_packages: + - 'example-server' + - 'example-utils' +__example__sysconfig_path: '/etc/sysconfig/example' diff --git a/roles/exoscale_vm/README.md b/roles/exoscale_vm/README.md index d51d17343..7232e8547 100644 --- a/roles/exoscale_vm/README.md +++ b/roles/exoscale_vm/README.md @@ -17,23 +17,53 @@ This role creates and manages instances (virtual machines) on [Exoscale](https:/ ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `exoscale_vm` | Creates and manages the instance | - | -| `exoscale_vm:firewalls` | Manage the provider firewalls of the host. | - | -| `exoscale_vm:networks` | Manage the provider private networks. | - | +`exoscale_vm` + +* Creates and manages the instance. +* Triggers: none. + +`exoscale_vm:firewalls` + +* Manage the provider firewalls of the host. +* Triggers: none. + +`exoscale_vm:networks` + +* Manage the provider private networks. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `exoscale_vm__account` | The name of the Exoscale account name as configured during `exo config`. Can be found in `~/.config/exoscale/exoscale.toml` afterwards. | -| `exoscale_vm__api_key` | Set the Exoscale API key. API keys can be managed [here](https://portal.exoscale.com/iam/api-keys). We recommend creating a unrestricted key, because else some operations fail. | -| `exoscale_vm__api_secret` | Set the Exoscale secret corresponding to the API key. | -| `exoscale_vm__service_offering` | The Exoscale service offering. This defines the amount of CPU cores, RAM and disk space. The possible options can be obtained using `exo compute instance-type list --verbose`. Note that these changes will only be applied to stopped instances. | -| `exoscale_vm__template` | The Exoscale template for the instance. The possible options can be obtained using `exo compute instance-template list`. Note that you have to use the ID instead of the name when referencing custom templates. | -| `exoscale_vm__zone` | The Exoscale zone the instance should be in. The possible options can be obtained using `exo zone list`. | +`exoscale_vm__account` + +* The name of the Exoscale account name as configured during `exo config`. Can be found in `~/.config/exoscale/exoscale.toml` afterwards. +* Type: String. + +`exoscale_vm__api_key` + +* Set the Exoscale API key. API keys can be managed [here](https://portal.exoscale.com/iam/api-keys). We recommend creating a unrestricted key, because else some operations fail. +* Type: String. + +`exoscale_vm__api_secret` + +* Set the Exoscale secret corresponding to the API key. +* Type: String. + +`exoscale_vm__service_offering` + +* The Exoscale service offering. This defines the amount of CPU cores, RAM and disk space. The possible options can be obtained using `exo compute instance-type list --verbose`. Note that these changes will only be applied to stopped instances. +* Type: String. + +`exoscale_vm__template` + +* The Exoscale template for the instance. The possible options can be obtained using `exo compute instance-template list`. Note that you have to use the ID instead of the name when referencing custom templates. +* Type: String. + +`exoscale_vm__zone` + +* The Exoscale zone the instance should be in. The possible options can be obtained using `exo zone list`. +* Type: String. Example: ```yaml @@ -49,16 +79,103 @@ exoscale_vm__zone: 'ch-dk-2' ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `exoscale_vm__disk_size` | The disk size in GBs. Must be greater than 10. Note that adjusting the disk size is not currently supported. | `10` | -| `exoscale_vm__name` | The name of the instance. By default, the Ansible inventory name prefixed with `e` is used, as it has to start with a letter. | `'e{{ inventory_hostname }}'` | -| `exoscale_vm__private_instance` | Boolean to choose if the instance should be "private" without a public IP, or not. | `false` | -| `exoscale_vm__private_networks` | A list of dictionaries defining which networks should be attached to this instance. It also allows the creation of new internal networks, or setting a fixed IP for the instance. Subkeys:
  • `name`: Mandatory, string. The name of an existing network, or the network which should be created.
  • `cidr`: Optional, string. If this is given, a new network with this cidr is created.
  • `fixed_ip`: Optional, string. The fixed IP of this instance. This can be used for attach to an existing network, or when creating a new one.
| `[]` | -| `exoscale_vm__security_group_rules` | A list of dictionaries containing rules for the security group (basically Exoscale firewall rules). Subkeys:
  • `cidr`: Optional, string. CIDR to be used for security group rule.
  • `protocol`: Mandatory, string. To which IP protocol the rule is applied. Possible options: `tcp`, `udp`, `icmp`.
  • `start_port`: Mandatory, int. The starting port.
  • `end_port`: Mandatory, int. The ending port.
  • `state`: Optional, string. State of the rule. Either `absent` or `present`. Defaults to `present`.
  • `type`: Mandatory, string. For which direction the rule should apply. Possible options: `ingress`, `egress`.
| unset | -| `exoscale_vm__ssh_key` | The name of the SSH-key depoited in Exoscale [here](https://portal.exoscale.com/compute/keypairs). Defaults to using the local username of the Ansible control node. | `'{{ lookup("env", "USER") }}'` | -| `exoscale_vm__state` | The state of the instance. Possible options:
  • deployed
  • started
  • stopped
  • restarted
  • restored
  • destroyed
  • expunged
  • present
  • absent
| `'started'` | -| `exoscale_vm__template_visibility` | Visibility of the Exoscale template for the instance. Usually `'private'` for custom templates. | `'public'` | +`exoscale_vm__disk_size` + +* The disk size in GBs. Must be greater than 10. Note that adjusting the disk size is not currently supported. +* Type: Number. +* Default: `10` + +`exoscale_vm__name` + +* The name of the instance. By default, the Ansible inventory name prefixed with `e` is used, as it has to start with a letter. +* Type: String. +* Default: `'e{{ inventory_hostname }}'` + +`exoscale_vm__private_instance` + +* Choose if the instance should be "private" without a public IP, or not. +* Type: Bool. +* Default: `true` + +`exoscale_vm__private_networks` + +* A list of dictionaries defining which networks should be attached to this instance. It also allows the creation of new internal networks, or setting a fixed IP for the instance. +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `name`: + + * Mandatory. The name of an existing network, or the network which should be created. + * Type: String. + + * `cidr`: + + * Optional. If this is given, a new network with this cidr is created. + * Type: String. + + * `fixed_ip`: + + * Optional. The fixed IP of this instance. This can be used for attach to an existing network, or when creating a new one. + * Type: String. + +`exoscale_vm__security_group_rules` + +* A list of dictionaries containing rules for the security group (basically Exoscale firewall rules). +* Type: List of dictionaries. +* Default: unset + +* Subkeys: + + * `cidr`: + + * Optional. CIDR to be used for security group rule. + * Type: String. + + * `protocol`: + + * Mandatory. To which IP protocol the rule is applied. Possible options: `tcp`, `udp`, `icmp`. + * Type: String. + + * `start_port`: + + * Mandatory. The starting port. + * Type: Number. + + * `end_port`: + + * Mandatory. The ending port. + * Type: Number. + + * `state`: + + * Optional. State of the rule. Either `absent` or `present`. + * Type: String. + * Default: `'present'` + + * `type`: + + * Mandatory. For which direction the rule should apply. Possible options: `ingress`, `egress`. + * Type: String. + +`exoscale_vm__ssh_key` + +* The name of the SSH-key deposited in Exoscale [here](https://portal.exoscale.com/compute/keypairs). Defaults to using the local username of the Ansible control node. +* Type: String. +* Default: `'{{ lookup("env", "USER") }}'` + +`exoscale_vm__state` + +* The state of the instance. Possible options: `deployed`, `started`, `stopped`, `restarted`, `restored`, `destroyed`, `expunged`, `present`, `absent`. +* Type: String. +* Default: `'started'` + +`exoscale_vm__template_visibility` + +* Visibility of the Exoscale template for the instance. Usually `'private'` for custom templates. +* Type: String. +* Default: `'public'` Example: ```yaml diff --git a/roles/exoscale_vm/tasks/main.yml b/roles/exoscale_vm/tasks/main.yml index 427507335..a28ae86f8 100644 --- a/roles/exoscale_vm/tasks/main.yml +++ b/roles/exoscale_vm/tasks/main.yml @@ -27,7 +27,7 @@ loop: '{{ exoscale_vm__security_group_rules }}' delegate_to: 'localhost' when: - - 'exoscale_vm__security_group_rules is defined' + - 'exoscale_vm__security_group_rules is defined and exoscale_vm__security_group_rules | length > 0' - 'exoscale_vm__state != "absent"' tags: diff --git a/roles/fail2ban/README.md b/roles/fail2ban/README.md index 72eb6d02f..d10033dd0 100644 --- a/roles/fail2ban/README.md +++ b/roles/fail2ban/README.md @@ -16,28 +16,102 @@ This role provides two additional filters: If you use the ["Fail2Ban" Playbook](https://github.com/Linuxfabrik/lfops/blob/main/playbooks/fail2ban.yml), this is automatically done for you. + ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `fail2ban` | Installs and configures fail2ban | Restarts fail2ban.service | -| `fail2ban:state` | Manages the state of the fail2ban service | - | +`fail2ban` + +* Installs and configures fail2ban. +* Triggers: fail2ban.service restart. + +`fail2ban:state` + +* Manages the state of the fail2ban service. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `fail2ban__jail_default_action` | The default action. This will be used in all jails which do not overwrite it. | `fail2ban__jail_default_banaction` | -| `fail2ban__jail_default_banaction` | The default banaction, which will be executed as defined in `fail2ban__jail_default_action` (assuming the jail does not overwrite it). | `'iptables-multiport'` | -| `fail2ban__jail_default_ignoreip` | List of IP addresses (in CIDR notation) that will be ignored from all jails (assuming the jail does not overwrite it). | `[]` | -| `fail2ban__jail_default_rocketchat_hook` | The incoming Rocket.Chat hook which will be used to send a notification on bans. For this to work `rocketchat` has to be in the action, have a look at `fail2ban__jail_default_action` (example below). | `''` | -| `fail2ban__jail_portscan_allowed_ports` | A list of ports which are allowed to be accessed. IPs accessing these ports will not be blocked. Note: This setting is for the portscan jail. | `[22]` | -| `fail2ban__jail_portscan_bantime` | The ban duration for the portscan jail. | `'8h'` | -| `fail2ban__jail_portscan_server_ips` | A list of IP addresses of the server. Only traffic destined for these IPs will be considered. This prevents accidental banning due to traffic which is passing by the server, but not destined for it. Note: This setting is for the portscan jail. | `'{{ ansible_facts["all_ipv4_addresses"] }}'` | -| `fail2ban__jail_sshd_bantime` | The ban duration for the sshd jail. | `'7d'` | -| `fail2ban__jails__group_var` / `fail2ban__jails__host_var` | The fail2ban jail definition. Subkeys:
  • `template`: Mandatory, string. Name of the Jinja template source file to use. Have a look at the possible options [here](https://github.com/Linuxfabrik/lfops/tree/main/roles/fail2ban/templates/etc/fail2ban/jail.d), or `raw`.
  • `filename`: Mandatory, string. Destination filename in `jail.d/`, and normally is equal to the name of the source `template` used. Will be suffixed with `.conf`.
  • `state`: Mandatory, string. State of the jail. Possible options: `absent`, `present`.
  • `raw`: Optional, string: Raw content for the jail.

For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). |
  • `z10-portscan`
  • `z10-sshd`
| -| `fail2ban__service_enabled` | Enables or disables the fail2ban service, analogous to `systemctl enable/disable --now`. Possible options: | `true` | +`fail2ban__jail_default_action` + +* The default action. This will be used in all jails which do not overwrite it. +* Type: String. +* Default: `fail2ban__jail_default_banaction` + +`fail2ban__jail_default_banaction` + +* The default banaction, which will be executed as defined in `fail2ban__jail_default_action` (assuming the jail does not overwrite it). +* Type: String. +* Default: `'iptables-multiport'` + +`fail2ban__jail_default_ignoreip` + +* List of IP addresses (in CIDR notation) that will be ignored from all jails (assuming the jail does not overwrite it). +* Type: List. +* Default: `[]` + +`fail2ban__jail_default_rocketchat_hook` + +* The incoming Rocket.Chat hook which will be used to send a notification on bans. For this to work `rocketchat` has to be in the action, have a look at `fail2ban__jail_default_action` (example below). +* Type: String. +* Default: `''` + +`fail2ban__jail_portscan_allowed_ports` + +* A list of ports which are allowed to be accessed. IPs accessing these ports will not be blocked. Note: This setting is for the portscan jail. +* Type: List. +* Default: `[22]` + +`fail2ban__jail_portscan_bantime` + +* The ban duration for the portscan jail. +* Type: String. +* Default: `'8h'` + +`fail2ban__jail_portscan_server_ips` + +* A list of IP addresses of the server. Only traffic destined for these IPs will be considered. This prevents accidental banning due to traffic which is passing by the server, but not destined for it. Note: This setting is for the portscan jail. +* Type: List. +* Default: `'{{ ansible_facts["all_ipv4_addresses"] }}'` + +`fail2ban__jail_sshd_bantime` + +* The ban duration for the sshd jail. +* Type: String. +* Default: `'7d'` + +`fail2ban__jails__group_var` / `fail2ban__jails__host_var` + +* The fail2ban jail definition. For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `z10-portscan`, `z10-sshd` +* Subkeys: + + * `template`: + + * Mandatory. Name of the Jinja template source file to use. Have a look at the possible options [here](https://github.com/Linuxfabrik/lfops/tree/main/roles/fail2ban/templates/etc/fail2ban/jail.d), or `raw`. + * Type: String. + + * `filename`: + + * Mandatory. Destination filename in `jail.d/`, and normally is equal to the name of the source `template` used. Will be suffixed with `.conf`. + * Type: String. + + * `state`: + + * Mandatory. State of the jail. Possible options: `absent`, `present`. + * Type: String. + + * `raw`: + + * Optional. Raw content for the jail. + * Type: String. + +`fail2ban__service_enabled` + +* Enables or disables the fail2ban service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` Example: ```yaml diff --git a/roles/fail2ban/tasks/main.yml b/roles/fail2ban/tasks/main.yml index ca1ce4273..97b01c1d3 100644 --- a/roles/fail2ban/tasks/main.yml +++ b/roles/fail2ban/tasks/main.yml @@ -26,6 +26,7 @@ - name: 'Deploy actions' ansible.builtin.template: + backup: true src: 'etc/fail2ban/action.d/{{ item }}.conf.j2' dest: '/etc/fail2ban/action.d/{{ item }}.conf' owner: 'root' @@ -38,6 +39,7 @@ - name: 'Deploy filters' ansible.builtin.template: + backup: true src: 'etc/fail2ban/filter.d/{{ item }}.conf.j2' dest: '/etc/fail2ban/filter.d/{{ item }}.conf' owner: 'root' @@ -51,6 +53,7 @@ - name: 'Deploy /etc/fail2ban/jail.d/z00-defaults.conf' ansible.builtin.template: + backup: true src: 'etc/fail2ban/jail.d/z00-defaults.conf.j2' dest: '/etc/fail2ban/jail.d/z00-defaults.conf' owner: 'root' @@ -75,6 +78,7 @@ - name: 'Create Jails' ansible.builtin.template: + backup: true src: 'etc/fail2ban/jail.d/z10-{{ item["template"] }}.conf.j2' dest: '/etc/fail2ban/jail.d/{{ item["filename"] }}.conf' owner: 'root' diff --git a/roles/fangfrisch/README.md b/roles/fangfrisch/README.md index 89d1f2215..b522e47e3 100644 --- a/roles/fangfrisch/README.md +++ b/roles/fangfrisch/README.md @@ -12,18 +12,30 @@ If you use the [Fangfrisch Playbook](https://github.com/Linuxfabrik/lfops/blob/m ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `fangfrisch` | Installs and configures Fangfrisch | - | -| `fangfrisch:state` | Manages the state of the Fangfrisch timer | - | +`fangfrisch` + +* Installs and configures Fangfrisch. +* Triggers: none. + +`fangfrisch:state` + +* Manages the state of the Fangfrisch timer. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `fangfrisch__securiteinfo_customer_id` | Set this to enable downloading signatures from [SecuriteInfo](https://www.securiteinfo.com/). Requires an [SecuriteInfo account](https://www.securiteinfo.com/clients/customers/account). | unset | -| `fangfrisch__timer_enabled` | Enables or disables the hourly fangfrisch timer to automatically update the signatures, analogous to `systemctl enable/disable`. | `true` | +`fangfrisch__securiteinfo_customer_id` + +* Set this to enable downloading signatures from [SecuriteInfo](https://www.securiteinfo.com/). Requires a [SecuriteInfo account](https://www.securiteinfo.com/clients/customers/account). +* Type: String. +* Default: unset + +`fangfrisch__timer_enabled` + +* Enables or disables the hourly fangfrisch timer to automatically update the signatures, analogous to `systemctl enable/disable`. +* Type: Bool. +* Default: `true` Example: ```yaml diff --git a/roles/fangfrisch/tasks/main.yml b/roles/fangfrisch/tasks/main.yml index 392b64e01..ad7c66d75 100644 --- a/roles/fangfrisch/tasks/main.yml +++ b/roles/fangfrisch/tasks/main.yml @@ -10,6 +10,7 @@ - name: 'Deploy /etc/fangfrisch.conf' ansible.builtin.template: + backup: true src: 'etc/fangfrisch.conf.j2' dest: '/etc/fangfrisch.conf' owner: 'clamupdate' @@ -23,6 +24,7 @@ - name: 'Deploy /etc/systemd/system/fangfrisch.service' ansible.builtin.template: + backup: true src: 'etc/systemd/system/fangfrisch.service.j2' dest: '/etc/systemd/system/fangfrisch.service' owner: 'root' @@ -31,6 +33,7 @@ - name: 'Deploy /etc/systemd/system/fangfrisch.timer' ansible.builtin.template: + backup: true src: 'etc/systemd/system/fangfrisch.timer.j2' dest: '/etc/systemd/system/fangfrisch.timer' owner: 'root' diff --git a/roles/files/README.md b/roles/files/README.md index 016592b40..9c01f6943 100644 --- a/roles/files/README.md +++ b/roles/files/README.md @@ -10,18 +10,140 @@ This role manages file system entities such as files, directories and symlinks. ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `files` | Manages files, directories and symlinks. | - | +`files` + +* Manages files, directories and symlinks. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `files__directories__host_var` /
`files__directories__group_var` | List of dictionaries containing the directories to manage. Subkeys:
  • `path`: Mandatory, string. Path to the directory.
  • `state`: Optional, string. State of the directory, one of `present`, `absent`. Defaults to `present`. Note: both operations are recursive.
  • `mode`: Optional, string. Mode (permissions) of the directory. Defaults to `0o755`.
  • `owner`: Optional, string. Owner of the directory. Defaults to `root`.
  • `group`: Optional, string. Group of the directory. Defaults to `root`.
| `[]` | -| `files__files__host_var` /
`files__files__group_var` | List of dictionaries containing the files to manage. Subkeys:
  • `path`: Mandatory, string. Path to the file.
  • `content`: Optional, string. Content of the file. If unset, the role copies the file from `inventory_dir ~ "/host_files/" ~ inventory_hostname ~ "/" ~ item["path"]`.
  • `state`: Optional, string. State of the file, one of `present`, `absent`. Defaults to `present`.
  • `mode`: Optional, string. Mode (permissions) of the file. Defaults to `0o644`.
  • `owner`: Optional, string. Owner of the file. Defaults to `root`.
  • `group`: Optional, string. Group of the file. Defaults to `root`.
  • `template`: Optional, boolean. Whether to process file as Jinja template. Note: only works if `content` is unset. Defaults to `false`.
| `[]` | -| `files__symlinks__host_var` /
`files__symlinks__group_var` | List of dictionaries the symlinks to manage. Subkeys:
  • `src`: Mandatory, string. Path to source of the symlink.
  • `dest`: Mandatory, string. Path to dest of the symlink.
  • `state`: Optional, string. State of the symlink, one of `present`, `absent`. Defaults to `present`.
  • `mode`: Optional, string. Mode (permissions) of the directory. Defaults to `0o644`.
  • `owner`: Optional, string. Owner of the directory. Defaults to `root`.
  • `group`: Optional, string. Group of the directory. Defaults to `root`.
| `[]` | +`files__directories__host_var` / `files__directories__group_var` + +* List of dictionaries containing the directories to manage. +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `path`: + + * Mandatory. Path to the directory. + * Type: String. + + * `state`: + + * Optional. State of the directory, one of `present`, `absent`. Note: both operations are recursive. + * Type: String. + * Default: `'present'` + + * `mode`: + + * Optional. Mode (permissions) of the directory. + * Type: String. + * Default: `0o755` + + * `owner`: + + * Optional. Owner of the directory. + * Type: String. + * Default: `'root'` + + * `group`: + + * Optional. Group of the directory. + * Type: String. + * Default: `'root'` + +`files__files__host_var` / `files__files__group_var` + +* List of dictionaries containing the files to manage. +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `path`: + + * Mandatory. Path to the file. + * Type: String. + + * `content`: + + * Optional. Content of the file. If unset, the role copies the file from `inventory_dir ~ "/host_files/" ~ inventory_hostname ~ "/" ~ item["path"]`. + * Type: String. + + * `state`: + + * Optional. State of the file, one of `present`, `absent`. + * Type: String. + * Default: `'present'` + + * `mode`: + + * Optional. Mode (permissions) of the file. + * Type: String. + * Default: `0o644` + + * `owner`: + + * Optional. Owner of the file. + * Type: String. + * Default: `'root'` + + * `group`: + + * Optional. Group of the file. + * Type: String. + * Default: `'root'` + + * `template`: + + * Optional. Whether to process file as Jinja template. Note: only works if `content` is unset. + * Type: Bool. + * Default: `false` + +`files__symlinks__host_var` / `files__symlinks__group_var` + +* List of dictionaries the symlinks to manage. +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `src`: + + * Mandatory. Path to source of the symlink. + * Type: String. + + * `dest`: + + * Mandatory. Path to dest of the symlink. + * Type: String. + + * `state`: + + * Optional. State of the symlink, one of `present`, `absent`. + * Type: String. + * Default: `'present'` + + * `mode`: + + * Optional. Mode (permissions) of the directory. + * Type: String. + * Default: `0o644` + + * `owner`: + + * Optional. Owner of the directory. + * Type: String. + * Default: `'root'` + + * `group`: + + * Optional. Group of the directory. + * Type: String. + * Default: `'root'` Example: ```yaml diff --git a/roles/firewall/README.md b/roles/firewall/README.md index e28c97b8f..fa20f6592 100644 --- a/roles/firewall/README.md +++ b/roles/firewall/README.md @@ -15,23 +15,79 @@ This role configures a firewall on the system. For the currently supported firew ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `firewall` | Configures a firewall on the system | Stops and masks:
  • firewalld.service
  • fwb.service
  • iptables.service
  • nftables.service
  • ufw.service
Starts and unmasks the firewall which is defined in `firewall__firewall` | -| `firewall:deploy_fwb_sh` | Deploys the `/etc/fwb.sh` file for Firewall Builder | Restarts fwb.service | -| `firewall:firewalld` | Manages firewalld | Reloads firewalld.service | +`firewall` + +* Configures a firewall on the system. +* Triggers: Stops and masks firewalld.service, fwb.service, iptables.service, nftables.service, ufw.service. Starts and unmasks the firewall which is defined in `firewall__firewall`. + +`firewall:deploy_fwb_sh` + +* Deploys the `/etc/fwb.sh` file for Firewall Builder. +* Triggers: fwb.service restart. + +`firewall:firewalld` + +* Manages firewalld. +* Triggers: firewalld.service reload. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `firewall__firewall` | Which firewall should be activated and configured. All other firewalls will be disabled. Possible options:
  • `'None'`
  • `'firewalld'`
  • `'fwbuilder'`
  • `'iptables'`
  • `'nftables'`
  • `'ufw'`
| `'fwbuilder'` | -| `firewall__firewalld_ports__group_var` /
`firewall__firewalld_ports__host_var` | List of dictionaries defining the FirewallD ports. Subkeys:
  • `port`: Mandatory, string. Port or port range.
  • `state`: Optional, string. State of the port. Either `enabled` or `disabled`. Defaults to `enabled.
| `[]` | -| `firewall__firewalld_services__group_var` /
`firewall__firewalld_services__host_var` | List of dictionaries defining the FirewallD services. Subkeys:
  • `port`: Mandatory, string. Name of the service.
  • `state`: Optional, string. State of the service. Either `enabled` or `disabled`. Defaults to `enabled.
| `[]` | -| `firewall__fwbuilder_fw_file` | The name of the Firewall Builder file which will be created when compiling the firewall in Firewall Builder. Needed if ``firewall__fwbuilder_repo_url`` is used and if the Firewall name within Firewall Builder differs from ``{{ inventory_hostname }}`` | `{{ inventory_hostname }}` | -| `firewall__fwbuilder_repo_url` | The GIT repository URL to clone the compiled firewall files from. | `unset` | +`firewall__firewall` + +* Which firewall should be activated and configured. All other firewalls will be disabled. Possible options: `'None'`, `'firewalld'`, `'fwbuilder'`, `'iptables'`, `'nftables'`, `'ufw'`. +* Type: String. +* Default: `'fwbuilder'` + +`firewall__firewalld_ports__group_var` / `firewall__firewalld_ports__host_var` + +* List of dictionaries defining the FirewallD ports. +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `port`: + + * Mandatory. Port or port range. + * Type: String. + + * `state`: + + * Optional. State of the port. Either `enabled` or `disabled`. + * Type: String. + * Default: `'enabled'` + +`firewall__firewalld_services__group_var` / `firewall__firewalld_services__host_var` + +* List of dictionaries defining the FirewallD services. +* Type: List of dictionaries. +* Default: `[]` + +* Subkeys: + + * `service`: + + * Mandatory. Name of the service. + * Type: String. + + * `state`: + + * Optional. State of the service. Either `enabled` or `disabled`. + * Type: String. + * Default: `'enabled'` + +`firewall__fwbuilder_fw_file` + +* The name of the Firewall Builder file which will be created when compiling the firewall in Firewall Builder. Needed if ``firewall__fwbuilder_repo_url`` is used and if the Firewall name within Firewall Builder differs from ``{{ inventory_hostname }}``. +* Type: String. +* Default: `'{{ inventory_hostname }}'` + +`firewall__fwbuilder_repo_url` +* The GIT repository URL to clone the compiled firewall files from. +* Type: String. +* Default: unset Example: ```yaml diff --git a/roles/firewall/tasks/main.yml b/roles/firewall/tasks/main.yml index 8863e0e9b..137bc1e23 100644 --- a/roles/firewall/tasks/main.yml +++ b/roles/firewall/tasks/main.yml @@ -171,6 +171,7 @@ - name: 'Deploy fwbuilder systemd service file' ansible.builtin.template: + backup: true src: 'etc/systemd/system/fwb.service.j2' dest: '/etc/systemd/system/fwb.service' owner: 'root' diff --git a/roles/freeipa_client/README.md b/roles/freeipa_client/README.md index 90e50866f..6f6ed213f 100644 --- a/roles/freeipa_client/README.md +++ b/roles/freeipa_client/README.md @@ -10,17 +10,45 @@ This role installs and configures [FreeIPA](https://www.freeipa.org/) as a clien ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `freeipa_client` | Installs and configures FreeIPA as a client | - | +`freeipa_client` + +* Installs and configures FreeIPA as a client. +* Triggers: none. ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `freeipa_client__create_home_dir` | Defines if PAM will be configured to create a users home directory if it does not exist. | `true` | -| `freeipa_client__ipa_admin_user` | The IPA admin user / Kerberos admin principal. | `{'username': 'admin', 'password': freeipa_server__ipa_admin_password}` | +`freeipa_client__create_home_dir` + +* Defines if PAM will be configured to create a users home directory if it does not exist. +* Type: Bool. +* Default: `true` + +`freeipa_client__ipa_admin_user` + +* The IPA admin user / Kerberos admin principal. +* Type: Dictionary. +* Default: + +```yaml +freeipa_client__ipa_admin_user: + username: 'admin' + password: '{{ freeipa_server__ipa_admin_password }}' +``` + +* Subkeys: + + * `username`: + + * Optional. The admin username. + * Type: String. + * Default: `'admin'` + + * `password`: + + * Optional. The admin password. + * Type: String. + * Default: `'{{ freeipa_server__ipa_admin_password }}'` Example: ```yaml diff --git a/roles/freeipa_server/README.md b/roles/freeipa_server/README.md index 7b5ef9ebe..4d948ea07 100644 --- a/roles/freeipa_server/README.md +++ b/roles/freeipa_server/README.md @@ -22,17 +22,68 @@ Ideally, the FreeIPA should be installed on a separate server. If that is not po ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `freeipa_server` | Installs and configures FreeIPA as a server | - | -| `freeipa_server:systemd_override` | Deploys `/etc/systemd/system/pki-tomcatd@.service.d/override.conf` | - | +`freeipa_server` + +* Installs and configures FreeIPA as a server. +* Triggers: none. + +`freeipa_server:configure` + +* Manages all FreeIPA resources (everything except installation). +* Triggers: none. + +`freeipa_server:group` + +* Manages FreeIPA groups. +* Triggers: none. + +`freeipa_server:hbacrule` + +* Manages FreeIPA HBAC rules. +* Triggers: none. + +`freeipa_server:hostgroup` + +* Manages FreeIPA host groups. +* Triggers: none. + +`freeipa_server:pwpolicy` + +* Manages FreeIPA password policies. +* Triggers: none. + +`freeipa_server:sudocmd` + +* Manages FreeIPA sudo commands and sudo command groups. +* Triggers: none. + +`freeipa_server:sudorule` + +* Manages FreeIPA sudo rules. +* Triggers: none. + +`freeipa_server:systemd_override` + +* Deploys `/etc/systemd/system/pki-tomcatd@.service.d/override.conf`. +* Triggers: none. + +`freeipa_server:user` + +* Manages FreeIPA users and their group memberships. +* Triggers: none. + ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `freeipa_server__directory_manager_password` | The password for the Directory Manager. This is the superuser that needs to be used to perform rare low level tasks. | -| `freeipa_server__ipa_admin_password` | The password for the FreeIPA admin. This user is a regular system account used for IPA server administration. Set this in the `group_vars` so that the `linuxfabrik.lfops.freeipa_client` role can use it. | +`freeipa_server__directory_manager_password` + +* The password for the Directory Manager. This is the superuser that needs to be used to perform rare low level tasks. +* Type: String. + +`freeipa_server__ipa_admin_password` + +* The password for the FreeIPA admin. This user is a regular system account used for IPA server administration. Set this in the `group_vars` so that the `linuxfabrik.lfops.freeipa_client` role can use it. +* Type: String. Example: ```yaml @@ -44,12 +95,450 @@ freeipa_server__ipa_admin_password: 'linuxfabrik' ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `freeipa_server__config_default_shell` | The default shell for the users in FreeIPA. | `'/bin/bash'` | -| `freeipa_server__config_password_expiration_notification` | When the password expiration notification for FreeIPA users should be sent, in days. | `10` | -| `freeipa_server__domain` | The primary DNS domain. Typically this should be the domain part of FQDN of the server. | `'{{ ansible_facts["domain"] \| lower }}'` | -| `freeipa_server__realm` | The kerberos protocol requires a Realm name to be defined. This is typically the domain name converted to uppercase. | `'{{ ansible_facts["domain"] \| upper }}'` | +`freeipa_server__config_default_shell` + +* The default shell for the users in FreeIPA. +* Type: String. +* Default: `'/bin/bash'` + +`freeipa_server__config_password_expiration_notification` + +* When the password expiration notification for FreeIPA users should be sent, in days. +* Type: Number. +* Default: `10` + +`freeipa_server__domain` + +* The primary DNS domain. Typically this should be the domain part of FQDN of the server. +* Type: String. +* Default: `'{{ ansible_facts["domain"] | lower }}'` + +`freeipa_server__groups__host_var` / `freeipa_server__groups__group_var` + +* FreeIPA groups to manage. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Name of the group. + * Type: String. + + * `description`: + + * Optional. Group description. + * Type: String. + + * `gidnumber`: + + * Optional. GID number. + * Type: Number. + + * `nonposix`: + + * Optional. Create as a non-POSIX group. + * Type: Bool. + + * `external`: + + * Optional. Allow external non-IPA members. + * Type: Bool. + + * `state`: + + * Optional. `present` or `absent`. Defaults to `present`. + * Type: String. + +`freeipa_server__hbacrules__host_var` / `freeipa_server__hbacrules__group_var` + +* FreeIPA HBAC rules to manage. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Name of the HBAC rule. + * Type: String. + + * `description`: + + * Optional. Rule description. + * Type: String. + + * `usercategory`: + + * Optional. User category (`all`). + * Type: String. + + * `hostcategory`: + + * Optional. Host category (`all`). + * Type: String. + + * `servicecategory`: + + * Optional. Service category (`all`). + * Type: String. + + * `users`: + + * Optional. List of user names. + * Type: List. + + * `groups`: + + * Optional. List of group names. + * Type: List. + + * `hosts`: + + * Optional. List of host names. + * Type: List. + + * `hostgroups`: + + * Optional. List of host group names. + * Type: List. + + * `hbacsvcs`: + + * Optional. List of HBAC service names. + * Type: List. + + * `hbacsvcgroups`: + + * Optional. List of HBAC service group names. + * Type: List. + + * `state`: + + * Optional. `enabled`, `disabled` or `absent`. Defaults to `enabled`. + * Type: String. + +`freeipa_server__hostgroups__host_var` / `freeipa_server__hostgroups__group_var` + +* FreeIPA host groups to manage. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Name of the host group. + * Type: String. + + * `description`: + + * Optional. Host group description. + * Type: String. + + * `hosts`: + + * Optional. List of host names to add as members. + * Type: List. + + * `hostgroups`: + + * Optional. List of host group names to add as members. + * Type: List. + + * `state`: + + * Optional. `present` or `absent`. Defaults to `present`. + * Type: String. + +`freeipa_server__ipa_admin_principal` + +* The Kerberos principal used for IPA admin authentication. +* Type: String. +* Default: `'admin'` + +`freeipa_server__pwpolicies__host_var` / `freeipa_server__pwpolicies__group_var` + +* FreeIPA password policies to manage. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Name of the group the policy applies to. + * Type: String. + + * `maxlife`: + + * Optional. Maximum password lifetime in days. + * Type: Number. + + * `minlife`: + + * Optional. Minimum password lifetime in hours. + * Type: Number. + + * `history`: + + * Optional. Password history size. + * Type: Number. + + * `minclasses`: + + * Optional. Minimum number of character classes. + * Type: Number. + + * `minlength`: + + * Optional. Minimum password length. + * Type: Number. + + * `maxfail`: + + * Optional. Maximum number of consecutive failures before lockout. + * Type: Number. + + * `failinterval`: + + * Optional. Period (in seconds) after which failure count is reset. + * Type: Number. + + * `lockouttime`: + + * Optional. Period (in seconds) for which account is locked. + * Type: Number. + + * `priority`: + + * Optional. Policy priority (lower value = higher priority). + * Type: Number. + + * `state`: + + * Optional. `present` or `absent`. Defaults to `present`. + * Type: String. + +`freeipa_server__realm` + +* The kerberos protocol requires a Realm name to be defined. This is typically the domain name converted to uppercase. +* Type: String. +* Default: `'{{ ansible_facts["domain"] | upper }}'` + +`freeipa_server__sudocmdgroups__host_var` / `freeipa_server__sudocmdgroups__group_var` + +* FreeIPA sudo command groups to manage. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Name of the sudo command group. + * Type: String. + + * `description`: + + * Optional. Command group description. + * Type: String. + + * `sudocmds`: + + * Optional. List of sudo command names to add as members. + * Type: List. + + * `state`: + + * Optional. `present` or `absent`. Defaults to `present`. + * Type: String. + +`freeipa_server__sudocmds__host_var` / `freeipa_server__sudocmds__group_var` + +* FreeIPA sudo commands to manage. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Command (e.g. `/usr/bin/less`). + * Type: String. + + * `description`: + + * Optional. Command description. + * Type: String. + + * `state`: + + * Optional. `present` or `absent`. Defaults to `present`. + * Type: String. + +`freeipa_server__sudorules__host_var` / `freeipa_server__sudorules__group_var` + +* FreeIPA sudo rules to manage. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Name of the sudo rule. + * Type: String. + + * `description`: + + * Optional. Rule description. + * Type: String. + + * `usercategory`: + + * Optional. User category (`all`). + * Type: String. + + * `hostcategory`: + + * Optional. Host category (`all`). + * Type: String. + + * `cmdcategory`: + + * Optional. Command category (`all`). + * Type: String. + + * `runasusercategory`: + + * Optional. RunAs user category (`all`). + * Type: String. + + * `runasgroupcategory`: + + * Optional. RunAs group category (`all`). + * Type: String. + + * `users`: + + * Optional. List of user names. + * Type: List. + + * `groups`: + + * Optional. List of group names. + * Type: List. + + * `hosts`: + + * Optional. List of host names. + * Type: List. + + * `hostgroups`: + + * Optional. List of host group names. + * Type: List. + + * `cmds`: + + * Optional. List of sudo command names. + * Type: List. + + * `cmdgroups`: + + * Optional. List of sudo command group names. + * Type: List. + + * `runasusers`: + + * Optional. List of RunAs user names. + * Type: List. + + * `runasgroups`: + + * Optional. List of RunAs group names. + * Type: List. + + * `options`: + + * Optional. List of sudo options (e.g. `!authenticate`). + * Type: List. + + * `order`: + + * Optional. Sudo rule order. + * Type: Number. + + * `state`: + + * Optional. `enabled`, `disabled` or `absent`. Defaults to `enabled`. + * Type: String. + +`freeipa_server__systemd_timeoutstartsec` + +* The `TimeoutStartSec` value for the `pki-tomcatd@.service` systemd override. +* Type: Number. +* Default: `300` + +`freeipa_server__users__host_var` / `freeipa_server__users__group_var` + +* FreeIPA users to manage. +* For the usage in `host_vars` / `group_vars` (can only be used in one group at a time). +* Type: List of dictionaries. +* Default: `[]` +* Subkeys: + + * `name`: + + * Mandatory. Login name. + * Type: String. + + * `first`: + + * Mandatory (for creation). First name. + * Type: String. + + * `last`: + + * Mandatory (for creation). Last name. + * Type: String. + + * `password`: + + * Optional. User password. + * Type: String. + + * `email`: + + * Optional. List of email addresses. + * Type: List. + + * `shell`: + + * Optional. Login shell. + * Type: String. + + * `sshpubkey`: + + * Optional. List of SSH public keys. + * Type: List. + + * `groups`: + + * Optional. List of group names the user should be a member of. + * Type: List. + + * `update_password`: + + * Optional. `on_create` or `always`. Defaults to `on_create`. + * Type: String. + + * `state`: + + * Optional. `present` or `absent`. Defaults to `present`. + * Type: String. Example: ```yaml @@ -57,8 +546,69 @@ Example: freeipa_server__config_default_shell: '/bin/bash' freeipa_server__config_password_expiration_notification: 10 freeipa_server__domain: 'example.com' +freeipa_server__groups__host_var: + - name: 'developers' + description: 'Development team' + - name: 'old-group' + state: 'absent' +freeipa_server__hbacrules__host_var: + - name: 'allow_developers_webservers' + users: + - 'developer1' + hostgroups: + - 'webservers' + hbacsvcs: + - 'sshd' + state: 'enabled' +freeipa_server__hostgroups__host_var: + - name: 'webservers' + description: 'Web server hosts' + hosts: + - 'web01.example.com' + - 'web02.example.com' +freeipa_server__ipa_admin_principal: 'admin' +freeipa_server__pwpolicies__host_var: + - name: 'developers' + maxlife: 90 + minlife: 1 + minlength: 12 + minclasses: 3 + priority: 10 freeipa_server__realm: 'EXAMPLE.COM' +freeipa_server__sudocmdgroups__host_var: + - name: 'network_cmds' + description: 'Network commands' + sudocmds: + - '/usr/bin/ip' + - '/usr/sbin/ss' +freeipa_server__sudocmds__host_var: + - name: '/usr/bin/ip' + description: 'IP command' + - name: '/usr/sbin/ss' + description: 'Socket statistics' +freeipa_server__sudorules__host_var: + - name: 'allow_developers_network' + groups: + - 'developers' + hostcategory: 'all' + cmdgroups: + - 'network_cmds' + options: + - '!authenticate' + state: 'enabled' freeipa_server__systemd_timeoutstartsec: 300 +freeipa_server__users__host_var: + - name: 'jdoe' + first: 'John' + last: 'Doe' + email: + - 'jdoe@example.com' + shell: '/bin/bash' + sshpubkey: + - 'ssh-ed25519 AAAAC3...' + groups: + - 'developers' + update_password: 'on_create' ``` diff --git a/roles/freeipa_server/defaults/main.yml b/roles/freeipa_server/defaults/main.yml index 9f3254004..0b97e199e 100644 --- a/roles/freeipa_server/defaults/main.yml +++ b/roles/freeipa_server/defaults/main.yml @@ -1,5 +1,102 @@ freeipa_server__config_default_shell: '/bin/bash' freeipa_server__config_password_expiration_notification: 10 freeipa_server__domain: '{{ ansible_facts["domain"] | lower }}' +freeipa_server__ipa_admin_principal: 'admin' freeipa_server__realm: '{{ ansible_facts["domain"] | upper }}' freeipa_server__systemd_timeoutstartsec: 300 + +freeipa_server__groups__dependent_var: [] +freeipa_server__groups__group_var: [] +freeipa_server__groups__host_var: [] +freeipa_server__groups__role_var: [] +freeipa_server__groups__combined_var: '{{ ( + freeipa_server__groups__role_var + + freeipa_server__groups__dependent_var + + freeipa_server__groups__group_var + + freeipa_server__groups__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +freeipa_server__hbacrules__dependent_var: [] +freeipa_server__hbacrules__group_var: [] +freeipa_server__hbacrules__host_var: [] +freeipa_server__hbacrules__role_var: [] +freeipa_server__hbacrules__combined_var: '{{ ( + freeipa_server__hbacrules__role_var + + freeipa_server__hbacrules__dependent_var + + freeipa_server__hbacrules__group_var + + freeipa_server__hbacrules__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +freeipa_server__hostgroups__dependent_var: [] +freeipa_server__hostgroups__group_var: [] +freeipa_server__hostgroups__host_var: [] +freeipa_server__hostgroups__role_var: [] +freeipa_server__hostgroups__combined_var: '{{ ( + freeipa_server__hostgroups__role_var + + freeipa_server__hostgroups__dependent_var + + freeipa_server__hostgroups__group_var + + freeipa_server__hostgroups__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +freeipa_server__pwpolicies__dependent_var: [] +freeipa_server__pwpolicies__group_var: [] +freeipa_server__pwpolicies__host_var: [] +freeipa_server__pwpolicies__role_var: [] +freeipa_server__pwpolicies__combined_var: '{{ ( + freeipa_server__pwpolicies__role_var + + freeipa_server__pwpolicies__dependent_var + + freeipa_server__pwpolicies__group_var + + freeipa_server__pwpolicies__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +freeipa_server__sudocmdgroups__dependent_var: [] +freeipa_server__sudocmdgroups__group_var: [] +freeipa_server__sudocmdgroups__host_var: [] +freeipa_server__sudocmdgroups__role_var: [] +freeipa_server__sudocmdgroups__combined_var: '{{ ( + freeipa_server__sudocmdgroups__role_var + + freeipa_server__sudocmdgroups__dependent_var + + freeipa_server__sudocmdgroups__group_var + + freeipa_server__sudocmdgroups__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +freeipa_server__sudocmds__dependent_var: [] +freeipa_server__sudocmds__group_var: [] +freeipa_server__sudocmds__host_var: [] +freeipa_server__sudocmds__role_var: [] +freeipa_server__sudocmds__combined_var: '{{ ( + freeipa_server__sudocmds__role_var + + freeipa_server__sudocmds__dependent_var + + freeipa_server__sudocmds__group_var + + freeipa_server__sudocmds__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +freeipa_server__sudorules__dependent_var: [] +freeipa_server__sudorules__group_var: [] +freeipa_server__sudorules__host_var: [] +freeipa_server__sudorules__role_var: [] +freeipa_server__sudorules__combined_var: '{{ ( + freeipa_server__sudorules__role_var + + freeipa_server__sudorules__dependent_var + + freeipa_server__sudorules__group_var + + freeipa_server__sudorules__host_var + ) | linuxfabrik.lfops.combine_lod + }}' + +freeipa_server__users__dependent_var: [] +freeipa_server__users__group_var: [] +freeipa_server__users__host_var: [] +freeipa_server__users__role_var: [] +freeipa_server__users__combined_var: '{{ ( + freeipa_server__users__role_var + + freeipa_server__users__dependent_var + + freeipa_server__users__group_var + + freeipa_server__users__host_var + ) | linuxfabrik.lfops.combine_lod + }}' diff --git a/roles/freeipa_server/tasks/main.yml b/roles/freeipa_server/tasks/main.yml index 5497c86be..1fd8c589a 100644 --- a/roles/freeipa_server/tasks/main.yml +++ b/roles/freeipa_server/tasks/main.yml @@ -34,22 +34,6 @@ ipaserver_realm: '{{ freeipa_server__realm }}' ipaserver_setup_firewalld: false - # we need to use shell instead of command for the kerberos ticket to work - - name: 'kinit' - ansible.builtin.shell: "echo '{{ freeipa_server__ipa_admin_password }}' | kinit" - - - name: "ipa config-mod --defaultshell='/bin/bash' --pwdexpnotify=10" - ansible.builtin.shell: "ipa config-mod --defaultshell='/bin/bash' --pwdexpnotify=10" - register: 'freeipa_server__ipa_config_mod_result' - changed_when: '"no modifications" not in freeipa_server__ipa_config_mod_result["stderr"]' - failed_when: - - 'freeipa_server__ipa_config_mod_result["failed"] is defined' - - 'freeipa_server__ipa_config_mod_result["failed"]' - - '"no modifications" not in freeipa_server__ipa_config_mod_result["stderr"]' - - - name: 'kdestroy' - ansible.builtin.shell: 'kdestroy' - tags: - 'freeipa_server' @@ -63,6 +47,7 @@ - name: 'deploy /etc/systemd/system/pki-tomcatd@.service.d/override.conf' ansible.builtin.template: + backup: true src: 'etc/systemd/system/pki-tomcatd@.service.d/override.conf.j2' dest: '/etc/systemd/system/pki-tomcatd@.service.d/override.conf' owner: 'root' @@ -71,4 +56,402 @@ tags: - 'freeipa_server' + - 'freeipa_server:configure' - 'freeipa_server:systemd_override' + + +# group management +- block: + + - name: 'Combined Groups:' + ansible.builtin.debug: + var: 'freeipa_server__groups__combined_var' + + - name: 'Remove FreeIPA groups' + # freeipa.ansible_freeipa.ipagroup: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipagroup: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: 'absent' + loop: '{{ freeipa_server__groups__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | list }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Create or update FreeIPA groups' + # freeipa.ansible_freeipa.ipagroup: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipagroup: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + description: '{{ item["description"] | default(omit) }}' + gidnumber: '{{ item["gidnumber"] | default(omit) }}' + nonposix: '{{ item["nonposix"] | default(omit) }}' + external: '{{ item["external"] | default(omit) }}' + state: 'present' + loop: '{{ (freeipa_server__groups__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__groups__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + + tags: + - 'freeipa_server' + - 'freeipa_server:configure' + - 'freeipa_server:group' + + +# host group management +- block: + + - name: 'Combined Host Groups:' + ansible.builtin.debug: + var: 'freeipa_server__hostgroups__combined_var' + + - name: 'Remove FreeIPA host groups' + # freeipa.ansible_freeipa.ipahostgroup: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipahostgroup: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: 'absent' + loop: '{{ freeipa_server__hostgroups__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | list }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Create or update FreeIPA host groups' + # freeipa.ansible_freeipa.ipahostgroup: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipahostgroup: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + description: '{{ item["description"] | default(omit) }}' + host: '{{ item["hosts"] | default(omit) }}' + hostgroup: '{{ item["hostgroups"] | default(omit) }}' + state: 'present' + loop: '{{ (freeipa_server__hostgroups__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__hostgroups__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + + tags: + - 'freeipa_server' + - 'freeipa_server:configure' + - 'freeipa_server:hostgroup' + + +# user management +- block: + + - name: 'Combined Users:' + ansible.builtin.debug: + var: 'freeipa_server__users__combined_var' + + - name: 'Remove FreeIPA users' + # freeipa.ansible_freeipa.ipauser: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipauser: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: 'absent' + loop: '{{ freeipa_server__users__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | list }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Create or update FreeIPA users' + # freeipa.ansible_freeipa.ipauser: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipauser: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + first: '{{ item["first"] }}' + last: '{{ item["last"] }}' + password: '{{ item["password"] | default(omit) }}' + email: '{{ item["email"] | default(omit) }}' + shell: '{{ item["shell"] | default(omit) }}' + sshpubkey: '{{ item["sshpubkey"] | default(omit) }}' + update_password: '{{ item["update_password"] | default("on_create") }}' + state: 'present' + loop: '{{ (freeipa_server__users__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__users__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + # no_log: true + + - name: 'Manage FreeIPA user group memberships' + # freeipa.ansible_freeipa.ipagroup: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipagroup: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item[1] }}' + user: + - '{{ item[0]["name"] }}' + action: 'member' + loop: '{{ ((freeipa_server__users__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__users__combined_var | selectattr("state", "undefined") | list)) + | subelements("groups", skip_missing=true) }}' + loop_control: + label: '{{ item[0]["name"] }} -> {{ item[1] }}' + + tags: + - 'freeipa_server' + - 'freeipa_server:configure' + - 'freeipa_server:user' + + +# HBAC rule management +- block: + + - name: 'Combined HBAC Rules:' + ansible.builtin.debug: + var: 'freeipa_server__hbacrules__combined_var' + + - name: 'Remove FreeIPA HBAC rules' + # freeipa.ansible_freeipa.ipahbacrule: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipahbacrule: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: 'absent' + loop: '{{ freeipa_server__hbacrules__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | list }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Create or update FreeIPA HBAC rules' + # freeipa.ansible_freeipa.ipahbacrule: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipahbacrule: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + description: '{{ item["description"] | default(omit) }}' + usercategory: '{{ item["usercategory"] | default(omit) }}' + hostcategory: '{{ item["hostcategory"] | default(omit) }}' + servicecategory: '{{ item["servicecategory"] | default(omit) }}' + state: 'present' + loop: '{{ (freeipa_server__hbacrules__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__hbacrules__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Manage FreeIPA HBAC rule members' + # freeipa.ansible_freeipa.ipahbacrule: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipahbacrule: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + user: '{{ item["users"] | default(omit) }}' + group: '{{ item["groups"] | default(omit) }}' + host: '{{ item["hosts"] | default(omit) }}' + hostgroup: '{{ item["hostgroups"] | default(omit) }}' + hbacsvc: '{{ item["hbacsvcs"] | default(omit) }}' + hbacsvcgroup: '{{ item["hbacsvcgroups"] | default(omit) }}' + action: 'member' + loop: '{{ (freeipa_server__hbacrules__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__hbacrules__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + when: 'item["users"] is defined or item["groups"] is defined or item["hosts"] is defined or item["hostgroups"] is defined or item["hbacsvcs"] is defined or item["hbacsvcgroups"] is defined' + + - name: 'Enable or disable FreeIPA HBAC rules' + # freeipa.ansible_freeipa.ipahbacrule: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipahbacrule: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: '{{ item["state"] | default("enabled") }}' + loop: '{{ (freeipa_server__hbacrules__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__hbacrules__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + + tags: + - 'freeipa_server' + - 'freeipa_server:configure' + - 'freeipa_server:hbacrule' + + +# password policy management +- block: + + - name: 'Combined Password Policies:' + ansible.builtin.debug: + var: 'freeipa_server__pwpolicies__combined_var' + + - name: 'Remove FreeIPA password policies' + # freeipa.ansible_freeipa.ipapwpolicy: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipapwpolicy: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: 'absent' + loop: '{{ freeipa_server__pwpolicies__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | list }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Create or update FreeIPA password policies' + # freeipa.ansible_freeipa.ipapwpolicy: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipapwpolicy: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + maxlife: '{{ item["maxlife"] | default(omit) }}' + minlife: '{{ item["minlife"] | default(omit) }}' + history: '{{ item["history"] | default(omit) }}' + minclasses: '{{ item["minclasses"] | default(omit) }}' + minlength: '{{ item["minlength"] | default(omit) }}' + maxfail: '{{ item["maxfail"] | default(omit) }}' + failinterval: '{{ item["failinterval"] | default(omit) }}' + lockouttime: '{{ item["lockouttime"] | default(omit) }}' + priority: '{{ item["priority"] | default(omit) }}' + state: 'present' + loop: '{{ (freeipa_server__pwpolicies__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__pwpolicies__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + + tags: + - 'freeipa_server' + - 'freeipa_server:configure' + - 'freeipa_server:pwpolicy' + + +# sudo command and command group management +- block: + + - name: 'Combined Sudo Commands:' + ansible.builtin.debug: + var: 'freeipa_server__sudocmds__combined_var' + + - name: 'Remove FreeIPA sudo commands' + # freeipa.ansible_freeipa.ipasudocmd: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipasudocmd: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: 'absent' + loop: '{{ freeipa_server__sudocmds__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | list }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Create or update FreeIPA sudo commands' + # freeipa.ansible_freeipa.ipasudocmd: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipasudocmd: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + description: '{{ item["description"] | default(omit) }}' + state: 'present' + loop: '{{ (freeipa_server__sudocmds__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__sudocmds__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Combined Sudo Command Groups:' + ansible.builtin.debug: + var: 'freeipa_server__sudocmdgroups__combined_var' + + - name: 'Remove FreeIPA sudo command groups' + # freeipa.ansible_freeipa.ipasudocmdgroup: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipasudocmdgroup: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: 'absent' + loop: '{{ freeipa_server__sudocmdgroups__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | list }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Create or update FreeIPA sudo command groups' + # freeipa.ansible_freeipa.ipasudocmdgroup: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipasudocmdgroup: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + description: '{{ item["description"] | default(omit) }}' + sudocmd: '{{ item["sudocmds"] | default(omit) }}' + state: 'present' + loop: '{{ (freeipa_server__sudocmdgroups__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__sudocmdgroups__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + + tags: + - 'freeipa_server' + - 'freeipa_server:configure' + - 'freeipa_server:sudocmd' + + +# sudo rule management +- block: + + - name: 'Combined Sudo Rules:' + ansible.builtin.debug: + var: 'freeipa_server__sudorules__combined_var' + + - name: 'Remove FreeIPA sudo rules' + # freeipa.ansible_freeipa.ipasudorule: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipasudorule: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: 'absent' + loop: '{{ freeipa_server__sudorules__combined_var | selectattr("state", "defined") | selectattr("state", "eq", "absent") | list }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Create or update FreeIPA sudo rules' + # freeipa.ansible_freeipa.ipasudorule: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipasudorule: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + description: '{{ item["description"] | default(omit) }}' + usercategory: '{{ item["usercategory"] | default(omit) }}' + hostcategory: '{{ item["hostcategory"] | default(omit) }}' + cmdcategory: '{{ item["cmdcategory"] | default(omit) }}' + runasusercategory: '{{ item["runasusercategory"] | default(omit) }}' + runasgroupcategory: '{{ item["runasgroupcategory"] | default(omit) }}' + order: '{{ item["order"] | default(omit) }}' + state: 'present' + loop: '{{ (freeipa_server__sudorules__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__sudorules__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + + - name: 'Manage FreeIPA sudo rule members' + # freeipa.ansible_freeipa.ipasudorule: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipasudorule: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + user: '{{ item["users"] | default(omit) }}' + group: '{{ item["groups"] | default(omit) }}' + host: '{{ item["hosts"] | default(omit) }}' + hostgroup: '{{ item["hostgroups"] | default(omit) }}' + cmd: '{{ item["cmds"] | default(omit) }}' + cmdgroup: '{{ item["cmdgroups"] | default(omit) }}' + runasuser: '{{ item["runasusers"] | default(omit) }}' + runasgroup: '{{ item["runasgroups"] | default(omit) }}' + sudooption: '{{ item["options"] | default(omit) }}' + action: 'member' + loop: '{{ (freeipa_server__sudorules__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__sudorules__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + when: 'item["users"] is defined or item["groups"] is defined or item["hosts"] is defined or item["hostgroups"] is defined or item["cmds"] is defined or item["cmdgroups"] is defined or item["runasusers"] is defined or item["runasgroups"] is defined or item["options"] is defined' + + - name: 'Enable or disable FreeIPA sudo rules' + # freeipa.ansible_freeipa.ipasudorule: # TODO: switch back once https://github.com/freeipa/ansible-freeipa/pull/1415 is merged and released + linuxfabrik.lfops.ipasudorule: + ipaadmin_password: '{{ freeipa_server__ipa_admin_password }}' + ipaadmin_principal: '{{ freeipa_server__ipa_admin_principal }}' + name: '{{ item["name"] }}' + state: '{{ item["state"] | default("enabled") }}' + loop: '{{ (freeipa_server__sudorules__combined_var | selectattr("state", "defined") | selectattr("state", "ne", "absent") | list) + + (freeipa_server__sudorules__combined_var | selectattr("state", "undefined") | list) }}' + loop_control: + label: '{{ item["name"] }}' + + tags: + - 'freeipa_server' + - 'freeipa_server:configure' + - 'freeipa_server:sudorule' diff --git a/roles/github_project_createrepo/README.md b/roles/github_project_createrepo/README.md index f51650513..41f70727a 100644 --- a/roles/github_project_createrepo/README.md +++ b/roles/github_project_createrepo/README.md @@ -14,17 +14,24 @@ If you use the [`github_project_createrepo` Playbook](https://github.com/Linuxfa ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `github_project_createrepo` | Installs and configures github_project_createrepo | - | -| `github_project_createrepo:configure` | Deploys `/etc/github_project_createrepo.yml` | - | +`github_project_createrepo` + +* Installs and configures github_project_createrepo. +* Triggers: none. + +`github_project_createrepo:configure` + +* Deploys `/etc/github_project_createrepo.yml`. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `github_project_createrepo__github_repos` | A list of dictionaries containing GitHub Repository from which the RPM-assets will be downloaded. Subkeys: Have a look at the project's [README](https://github.com/Linuxfabrik/github-project-createrepo/blob/main/README.md#configuration) | +`github_project_createrepo__github_repos` + +* A list of dictionaries containing GitHub Repository from which the RPM-assets will be downloaded. Subkeys: Have a look at the project's [README](https://github.com/Linuxfabrik/github-project-createrepo/blob/main/README.md#configuration). +* Type: List of dictionaries. +* Default: none Example: ```yaml @@ -44,11 +51,23 @@ github_project_createrepo__github_repos: ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `github_project_createrepo__base_path` | Directory under which all the repos will be placed. This directory should be served by a webserver. | `'/var/www/html/github-repos'` | -| `github_project_createrepo__timer_enabled` | Enables or disables the github-project-createrepo timer, analogous to `systemctl enable/disable --now`. | `true` | -| `github_project_createrepo__webserver_user` | The user under which the webserver runs. Will be used to set the correct FACL entries so that both users can access the files. | `'apache'` | +`github_project_createrepo__base_path` + +* Directory under which all the repos will be placed. This directory should be served by a webserver. +* Type: String. +* Default: `'/var/www/html/github-repos'` + +`github_project_createrepo__timer_enabled` + +* Enables or disables the github-project-createrepo timer, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`github_project_createrepo__webserver_user` + +* The user under which the webserver runs. Will be used to set the correct FACL entries so that both users can access the files. +* Type: String. +* Default: `'apache'` Example: ```yaml diff --git a/roles/github_project_createrepo/tasks/main.yml b/roles/github_project_createrepo/tasks/main.yml index 7cb659eb3..b6cf937d7 100644 --- a/roles/github_project_createrepo/tasks/main.yml +++ b/roles/github_project_createrepo/tasks/main.yml @@ -108,6 +108,7 @@ - name: 'deploy /etc/github-project-createrepo.yml' ansible.builtin.template: + backup: true src: 'etc/github-project-createrepo.yml.j2' dest: '/etc/github-project-createrepo.yml' owner: 'root' diff --git a/roles/gitlab_ce/README.md b/roles/gitlab_ce/README.md index 892baeca8..b53dc6245 100644 --- a/roles/gitlab_ce/README.md +++ b/roles/gitlab_ce/README.md @@ -15,17 +15,31 @@ If you use the [gitlab_ce Playbook](https://github.com/Linuxfabrik/lfops/blob/ma ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `gitlab_ce` | * `install tar gitlab-ce`
* `mkdir -p /backup/gitlab`
* Deploy `/etc/systemd/system/gitlab-dump.service`
* Deploy `/etc/systemd/system/gitlab-dump.timer`
* `systemctl enable gitlab-dump.timer --now`
* Deploy `/etc/gitlab/gitlab.rb`
* `gitlab-ctl reconfigure`
* `gitlab-ctl restart` | `gitlab-ctl restart` | -| `gitlab_ce:configure` | Same as above, but without install. | `gitlab-ctl restart` | +`gitlab_ce` + +* `install tar gitlab-ce` +* `mkdir -p /backup/gitlab` +* Deploy `/etc/systemd/system/gitlab-dump.service` +* Deploy `/etc/systemd/system/gitlab-dump.timer` +* `systemctl enable gitlab-dump.timer --now` +* Deploy `/etc/gitlab/gitlab.rb` +* `gitlab-ctl reconfigure` +* `gitlab-ctl restart` +* Triggers: `gitlab-ctl restart`. + +`gitlab_ce:configure` + +* Same as above, but without install. +* Triggers: `gitlab-ctl restart`. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `gitlab_ce__rb_external_url` | The URL of your GitLab instance. Currently, only `http://` is supported by this role. If running behind a reverse proxy or on a trusted network, this is good enough. | +`gitlab_ce__rb_external_url` + +* The URL of your GitLab instance. Currently, only `http://` is supported by this role. If running behind a reverse proxy or on a trusted network, this is good enough. +* Type: String. +* Default: none Example: ```yaml @@ -36,49 +50,251 @@ gitlab_ce__rb_external_url: 'http://git.example.com' ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `gitlab_ce__on_calendar` | The `OnCalendar` definition for the GitLab Backup. Have a look at `man systemd.time(7)` for the format. | `'*-*-* 23:{{ 59 \| random(seed=inventory_hostname) }}'` | -| `gitlab_ce__rb_git_data_dirs_default_path` | For setting up different data storing directory. If missing, the directory will be created by GitLab. If you want to use a single non-default directory to store git data use a path that doesn't contain symlinks. [Docs](https://docs.gitlab.com/omnibus/settings/configuration.html#store-git-data-in-an-alternative-directory) | unset | -| `gitlab_ce__rb_gitlab_rails_backup_keep_time` | The duration in seconds to keep backups before they are allowed to be deleted | `86400` (24h) | -| `gitlab_ce__rb_gitlab_rails_backup_path` | Backup Settings. [Docs](https://docs.gitlab.com/omnibus/settings/backups.html) | `'/backup/gitlab'` | -| `gitlab_ce__rb_gitlab_rails_extra_matomo_site_id` | Extra customization for Matomo | unset | -| `gitlab_ce__rb_gitlab_rails_extra_matomo_url` | Extra customization for Matomo | unset | -| `gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_builds` | Whether builds are enabled by default for projects. | `true` | -| `gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_container_registry` | Whether the container registry is enabled by default for projects. | `true` | -| `gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_issues` | Whether issues are enabled by default for projects. | `true` | -| `gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_merge_requests` | Whether merge requests are enabled by default for projects. | `true` | -| `gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_snippets` | Whether snippets are enabled by default for projects. | `true` | -| `gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_wiki` | Whether the wiki feature is enabled by default for projects. | `true` | -| `gitlab_ce__rb_gitlab_rails_gitlab_email_display_name` | | `'GitLab@{{ inventory_hostname }}'` | -| `gitlab_ce__rb_gitlab_rails_gitlab_email_from` | If your SMTP server does not like the default 'From: gitlab@gitlab.example.com', you can change the 'From' with this setting. | `'{{ mailto_root__from \| d("") }}'` | -| `gitlab_ce__rb_gitlab_rails_gitlab_email_reply_to` | The 'Reply To' address for emails if it differs from the 'From' address. | unset | -| `gitlab_ce__rb_gitlab_rails_ldap_enabled` | Whether the LDAP integration is enabled. [Docs](https://docs.gitlab.com/administration/uploads/#using-local-storage) | `false` | -| `gitlab_ce__rb_gitlab_rails_ldap_servers` | LDAP configuration for one or more servers. [Docs](https://docs.gitlab.com/administration/auth/ldap/?tab=Self-compiled+%28source%29#configure-ldap) | unset | -| `gitlab_ce__rb_gitlab_rails_omniauth_allow_single_sign_on` | OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) | unset | -| `gitlab_ce__rb_gitlab_rails_omniauth_auto_link_ldap_user` | OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) | unset | -| `gitlab_ce__rb_gitlab_rails_omniauth_block_auto_created_users` | OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) | unset | -| `gitlab_ce__rb_gitlab_rails_omniauth_enabled` | OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) | unset | -| `gitlab_ce__rb_gitlab_rails_omniauth_external_providers` | OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) | unset | -| `gitlab_ce__rb_gitlab_rails_omniauth_providers` | OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) | unset | -| `gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_bantime` | Ban an IP for x seconds after too many auth attempts | `3600` (1h) | -| `gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_enabled` | | `true` | -| `gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_findtime` | Reset the auth attempt counter per IP after x seconds | `60` | -| `gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_ip_whitelist` | | `['127.0.0.1']` | -| `gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_maxretry` | Limit the number of Git HTTP authentication attempts per IP | `10` | -| `gitlab_ce__rb_gitlab_rails_time_zone` | [Docs](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/gitlab.yml.md) | `'Europe/Zurich'` | -| `gitlab_ce__rb_gitlab_rails_uploads_directory` | For setting up a different storage directory for uploads. If missing, the directory will be created by GitLab. [Docs](https://docs.gitlab.com/administration/uploads/#using-local-storage) | `'/var/opt/gitlab/gitlab-rails/uploads'` | -| `gitlab_ce__rb_letsencrypt_enable` | If GitLab should manage Let's Encrypt certificates itself | `false` | -| `gitlab_ce__rb_nginx_listen_https` | Set this to `false` only if your reverse proxy internally communicates over HTTP. [Docs](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl) | `false` | -| `gitlab_ce__rb_nginx_listen_port` | Override only if you use a reverse proxy. [Docs](https://docs.gitlab.com/omnibus/settings/nginx.html#setting-the-nginx-listen-port) | `80` | -| `gitlab_ce__rb_nginx_ssl_certificate_key`` | Path to the SSL certificate key. | unset | -| `gitlab_ce__rb_nginx_ssl_certificate`` | Path to the SSL certificate. | unset | -| `gitlab_ce__rb_registry_external_url` | The URL of the GitLab Container registry. | unset | -| `gitlab_ce__rb_registry_nginx_enable` | Set this to `true` to enable the GitLab Container Registry. | unset | -| `gitlab_ce__rb_registry_nginx_listen_https` | Set this to `false` only if your reverse proxy internally communicates over HTTP. [Docs](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl) | `false` | -| `gitlab_ce__rb_registry_nginx_listen_port` | The port on which the Container Registry is listening. | `5050` | -| `gitlab_ce__rb_registry_nginx_proxy_set_headers` | Nginx headers for the Container Registry. | `{'X-Forwarded-Proto': 'https', 'X-Forwarded-Ssl': 'on'}` | -| `gitlab_ce__version` | The GitLab version to install. This is useful when restoring from a backup. When unset, the latest available version is used. | unset | +`gitlab_ce__on_calendar` + +* The `OnCalendar` definition for the GitLab Backup. Have a look at `man systemd.time(7)` for the format. +* Type: String. +* Default: `'*-*-* 23:{{ 59 | random(seed=inventory_hostname) }}'` + +`gitlab_ce__rb_git_data_dirs_default_path` + +* For setting up different data storing directory. If missing, the directory will be created by GitLab. If you want to use a single non-default directory to store git data use a path that doesn't contain symlinks. [Docs](https://docs.gitlab.com/omnibus/settings/configuration.html#store-git-data-in-an-alternative-directory) +* Type: String. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_backup_keep_time` + +* The duration in seconds to keep backups before they are allowed to be deleted. +* Type: Number. +* Default: `86400` + +`gitlab_ce__rb_gitlab_rails_backup_path` + +* Backup Settings. [Docs](https://docs.gitlab.com/omnibus/settings/backups.html) +* Type: String. +* Default: `'/backup/gitlab'` + +`gitlab_ce__rb_gitlab_rails_extra_matomo_site_id` + +* Extra customization for Matomo. +* Type: String. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_extra_matomo_url` + +* Extra customization for Matomo. +* Type: String. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_builds` + +* Whether builds are enabled by default for projects. +* Type: Bool. +* Default: `true` + +`gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_container_registry` + +* Whether the container registry is enabled by default for projects. +* Type: Bool. +* Default: `true` + +`gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_issues` + +* Whether issues are enabled by default for projects. +* Type: Bool. +* Default: `true` + +`gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_merge_requests` + +* Whether merge requests are enabled by default for projects. +* Type: Bool. +* Default: `true` + +`gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_snippets` + +* Whether snippets are enabled by default for projects. +* Type: Bool. +* Default: `true` + +`gitlab_ce__rb_gitlab_rails_gitlab_default_projects_features_wiki` + +* Whether the wiki feature is enabled by default for projects. +* Type: Bool. +* Default: `true` + +`gitlab_ce__rb_gitlab_rails_gitlab_email_display_name` + +* The display name used in GitLab emails. +* Type: String. +* Default: `'GitLab@{{ inventory_hostname }}'` + +`gitlab_ce__rb_gitlab_rails_gitlab_email_from` + +* If your SMTP server does not like the default 'From: gitlab@gitlab.example.com', you can change the 'From' with this setting. +* Type: String. +* Default: `'{{ mailto_root__from | d("") }}'` + +`gitlab_ce__rb_gitlab_rails_gitlab_email_reply_to` + +* The 'Reply To' address for emails if it differs from the 'From' address. +* Type: String. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_ldap_enabled` + +* Whether the LDAP integration is enabled. [Docs](https://docs.gitlab.com/administration/uploads/#using-local-storage) +* Type: Bool. +* Default: `false` + +`gitlab_ce__rb_gitlab_rails_ldap_servers` + +* LDAP configuration for one or more servers. [Docs](https://docs.gitlab.com/administration/auth/ldap/?tab=Self-compiled+%28source%29#configure-ldap) +* Type: Dictionary. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_omniauth_allow_single_sign_on` + +* OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) +* Type: List. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_omniauth_auto_link_ldap_user` + +* OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) +* Type: Bool. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_omniauth_block_auto_created_users` + +* OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) +* Type: Bool. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_omniauth_enabled` + +* OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) +* Type: Bool. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_omniauth_external_providers` + +* OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) +* Type: List. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_omniauth_providers` + +* OmniAuth Settings. [Docs](https://docs.gitlab.com/ee/integration/omniauth.html) +* Type: List of dictionaries. +* Default: unset + +`gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_bantime` + +* Ban an IP for x seconds after too many auth attempts. +* Type: Number. +* Default: `3600` + +`gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_enabled` + +* Whether rack attack for Git basic auth is enabled. +* Type: Bool. +* Default: `true` + +`gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_findtime` + +* Reset the auth attempt counter per IP after x seconds. +* Type: Number. +* Default: `60` + +`gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_ip_whitelist` + +* List of IP addresses to whitelist from rack attack. +* Type: List. +* Default: `['127.0.0.1']` + +`gitlab_ce__rb_gitlab_rails_rack_attack_git_basic_auth_maxretry` + +* Limit the number of Git HTTP authentication attempts per IP. +* Type: Number. +* Default: `10` + +`gitlab_ce__rb_gitlab_rails_time_zone` + +* The time zone for GitLab. [Docs](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/gitlab.yml.md) +* Type: String. +* Default: `'Europe/Zurich'` + +`gitlab_ce__rb_gitlab_rails_uploads_directory` + +* For setting up a different storage directory for uploads. If missing, the directory will be created by GitLab. [Docs](https://docs.gitlab.com/administration/uploads/#using-local-storage) +* Type: String. +* Default: `'/var/opt/gitlab/gitlab-rails/uploads'` + +`gitlab_ce__rb_letsencrypt_enable` + +* If GitLab should manage Let's Encrypt certificates itself. +* Type: Bool. +* Default: `false` + +`gitlab_ce__rb_nginx_listen_https` + +* Set this to `false` only if your reverse proxy internally communicates over HTTP. [Docs](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl) +* Type: Bool. +* Default: `false` + +`gitlab_ce__rb_nginx_listen_port` + +* Override only if you use a reverse proxy. [Docs](https://docs.gitlab.com/omnibus/settings/nginx.html#setting-the-nginx-listen-port) +* Type: Number. +* Default: `80` + +`gitlab_ce__rb_nginx_ssl_certificate` + +* Path to the SSL certificate. +* Type: String. +* Default: unset + +`gitlab_ce__rb_nginx_ssl_certificate_key` + +* Path to the SSL certificate key. +* Type: String. +* Default: unset + +`gitlab_ce__rb_registry_external_url` + +* The URL of the GitLab Container registry. +* Type: String. +* Default: unset + +`gitlab_ce__rb_registry_nginx_enable` + +* Set this to `true` to enable the GitLab Container Registry. +* Type: Bool. +* Default: unset + +`gitlab_ce__rb_registry_nginx_listen_https` + +* Set this to `false` only if your reverse proxy internally communicates over HTTP. [Docs](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl) +* Type: Bool. +* Default: `false` + +`gitlab_ce__rb_registry_nginx_listen_port` + +* The port on which the Container Registry is listening. +* Type: Number. +* Default: `5050` + +`gitlab_ce__rb_registry_nginx_proxy_set_headers` + +* Nginx headers for the Container Registry. +* Type: Dictionary. +* Default: `{'X-Forwarded-Proto': 'https', 'X-Forwarded-Ssl': 'on'}` + +`gitlab_ce__version` + +* The GitLab version to install. This is useful when restoring from a backup. When unset, the latest available version is used. +* Type: String. +* Default: unset Example (GitLab running on port 80 behind a reverse proxy, offering Google Authentication, with Matomo integration, plus running a registry): ```yaml diff --git a/roles/gitlab_ce/tasks/main.yml b/roles/gitlab_ce/tasks/main.yml index ec0e6ebde..61458f26f 100644 --- a/roles/gitlab_ce/tasks/main.yml +++ b/roles/gitlab_ce/tasks/main.yml @@ -32,6 +32,7 @@ - name: 'Deploy /etc/systemd/system/gitlab-dump.service' ansible.builtin.template: + backup: true src: 'etc/systemd/system/gitlab-dump.service.j2' dest: '/etc/systemd/system/gitlab-dump.service' owner: 'root' @@ -40,6 +41,7 @@ - name: 'Deploy /etc/systemd/system/gitlab-dump.timer' ansible.builtin.template: + backup: true src: 'etc/systemd/system/gitlab-dump.timer.j2' dest: '/etc/systemd/system/gitlab-dump.timer' owner: 'root' @@ -56,6 +58,7 @@ - name: 'Deploy /etc/gitlab/gitlab.rb' ansible.builtin.template: + backup: true src: 'etc/gitlab/gitlab.rb.j2' dest: '/etc/gitlab/gitlab.rb' owner: 'root' diff --git a/roles/glances/README.md b/roles/glances/README.md index 0606a3f48..87a0da1c3 100644 --- a/roles/glances/README.md +++ b/roles/glances/README.md @@ -10,9 +10,10 @@ This role installs [glances](https://nicolargo.github.io/glances/). It also alia ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `glances` | Installs glances and configures the alias | - | +`glances` + +* Installs glances and configures the alias. +* Triggers: none. ## License diff --git a/roles/glances/tasks/main.yml b/roles/glances/tasks/main.yml index 5cb2f4078..969ebb3d1 100644 --- a/roles/glances/tasks/main.yml +++ b/roles/glances/tasks/main.yml @@ -8,6 +8,7 @@ - name: 'Deploy /etc/profile.d/glances.sh' ansible.builtin.template: + backup: true src: 'etc/profile.d/glances.sh.j2' dest: '/etc/profile.d/glances.sh' owner: 'root' diff --git a/roles/glpi_agent/README.md b/roles/glpi_agent/README.md index 21065439c..84c5eddb5 100644 --- a/roles/glpi_agent/README.md +++ b/roles/glpi_agent/README.md @@ -12,18 +12,29 @@ If you use the ["GLPI Agent" Playbook](https://github.com/Linuxfabrik/lfops/blob ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `glpi_agent` | Installs and configure GLPI Agent. | Restarts glpi-agent.service | -| `glpi_agent:configure` | Deploys the configuration file. | Restarts glpi-agent.service | -| `glpi_agent:state` | Manages the state of the systemd service. | - | +`glpi_agent` + +* Installs and configure GLPI Agent. +* Triggers: glpi-agent.service restart. + +`glpi_agent:configure` + +* Deploys the configuration file. +* Triggers: glpi-agent.service restart. + +`glpi_agent:state` + +* Manages the state of the systemd service. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `glpi_agent__conf_server` | String. Specifies the server to use both as a controller for the agent, and as a recipient for task execution output. | +`glpi_agent__conf_server` + +* Specifies the server to use both as a controller for the agent, and as a recipient for task execution output. +* Type: String. +* Default: none Example: ```yaml @@ -34,13 +45,35 @@ glpi_agent__conf_server: 'https://glpi.example.com' ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `glpi_agent__conf_local` | String. Write the results of the tasks execution locally. | `'/tmp'` | -| `glpi_agent__conf_no_ssl_check` | Bool. Ignore self-signed certificates of the server. | `false` | -| `glpi_agent__conf_ssl_fingerprint` | Specifies the fingerprint of the ssl server certificate to trust. The fingerprint to use can be retrieved in agent log by temporarily enabling `glpi_agent__conf_no_ssl_check` option. | unset | -| `glpi_agent__service_enabled` | Bool. Enables or disables the service, analogous to `systemctl enable/disable --now`. | `true` | -| `glpi_agent__version` | String. The version of blocky to install. Possible options: `'latest'`, or any from https://github.com/glpi-project/glpi-agent/releases. | `'latest'` | +`glpi_agent__conf_local` + +* Write the results of the tasks execution locally. +* Type: String. +* Default: `'/tmp'` + +`glpi_agent__conf_no_ssl_check` + +* Ignore self-signed certificates of the server. +* Type: Bool. +* Default: `false` + +`glpi_agent__conf_ssl_fingerprint` + +* Specifies the fingerprint of the ssl server certificate to trust. The fingerprint to use can be retrieved in agent log by temporarily enabling `glpi_agent__conf_no_ssl_check` option. +* Type: String. +* Default: unset + +`glpi_agent__service_enabled` + +* Enables or disables the service, analogous to `systemctl enable/disable --now`. +* Type: Bool. +* Default: `true` + +`glpi_agent__version` + +* The version of blocky to install. Possible options: `'latest'`, or any from https://github.com/glpi-project/glpi-agent/releases. +* Type: String. +* Default: `'latest'` Example: ```yaml diff --git a/roles/glpi_agent/tasks/main.yml b/roles/glpi_agent/tasks/main.yml index f559c6c02..fa28c603e 100644 --- a/roles/glpi_agent/tasks/main.yml +++ b/roles/glpi_agent/tasks/main.yml @@ -59,6 +59,7 @@ - name: 'Deploy /etc/glpi-agent/conf.d/z00-linuxfabrik.cfg' ansible.builtin.template: + backup: true src: 'etc/glpi-agent/conf.d/z00-linuxfabrik.cfg.j2' dest: '/etc/glpi-agent/conf.d/z00-linuxfabrik.cfg' owner: 'root' diff --git a/roles/grafana/README.md b/roles/grafana/README.md index 7dfbf1d3f..0e3f0dbd4 100644 --- a/roles/grafana/README.md +++ b/roles/grafana/README.md @@ -10,23 +10,48 @@ This role installs and configures [Grafana](https://grafana.com/). ## Tags -| Tag | What it does | Reload / Restart | -| --- | ------------ | ---------------- | -| `grafana` | Installs and configures Grafana | Restarts grafana-server.service | -| `grafana:configure` | Deploys the Grafana config files | Restarts grafana-server.service | -| `grafana:plugins` | Manages Grafana Plugins | Restarts grafana-server.service | -| `grafana:provisioning` | Deploys the Grafana provisioning config files | Restarts grafana-server.service | -| `grafana:service_accounts` | Creates Service Accounts and their tokens | - | -| `grafana:state` | Manages the state of the systemd service | - | +`grafana` + +* Installs and configures Grafana. +* Triggers: grafana-server.service restart. + +`grafana:configure` + +* Deploys the Grafana config files. +* Triggers: grafana-server.service restart. + +`grafana:plugins` + +* Manages Grafana Plugins. +* Triggers: grafana-server.service restart. + +`grafana:provisioning` + +* Deploys the Grafana provisioning config files. +* Triggers: grafana-server.service restart. + +`grafana:service_accounts` + +* Creates Service Accounts and their tokens. +* Triggers: none. + +`grafana:state` + +* Manages the state of the systemd service. +* Triggers: none. ## Mandatory Role Variables -| Variable | Description | -| -------- | ----------- | -| `grafana__admin_login` | The Grafana admin account. | -| `grafana__root_url` | The root url on which Grafana is reachable. | +`grafana__admin_login` + +* The Grafana admin account. +* Type: Dictionary. +`grafana__root_url` + +* The root url on which Grafana is reachable. +* Type: String. Example: ```yaml @@ -40,28 +65,329 @@ grafana__root_url: 'https://monitoring.example.com/grafana' ## Optional Role Variables -| Variable | Description | Default Value | -| -------- | ----------- | ------------- | -| `grafana__allow_embedding` | Whether to allow browsers to render Grafana in a ``, `